diff -Nru lxd-2.9.1/debian/changelog lxd-2.9.2/debian/changelog --- lxd-2.9.1/debian/changelog 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/changelog 2017-02-21 05:11:01.000000000 +0000 @@ -1,3 +1,31 @@ +lxd (2.9.2-0ubuntu1) zesty; urgency=medium + + * New upstream bugfix release (2.9.2): + - lxd/containers: Add fun to detect root disk device + - lxd/containers: Ensure proper root disk device + - lxd/containers: Helper to retrieve pool from devices + - lxd/containers: Path may only be used by one disk + - lxd/init: Fix regressions caused by storage work + - lxd/init: Small fixes + - lxd/migration: Call helper to detect valid storage pool + - lxd/migration: Fix moving containers with storage api + - lxd/patches: Handle partial upgrades + pool fixes + - lxd/patches: Handle partial upgrades + pool fixes + - lxd/patches: Improve btrfs upgrade + - lxd/patches: Improve dir upgrade + - lxd/patches: Only rerun pool updates + - lxd/profiles: Verify root disk devices + - lxd/storage/btrfs: Enable quotas on the pools we create + - lxd/storage/dir: Delete image from database + - Makefile: Always include gorilla/context + - Makefile: Drop repeated calls to "go get" + - tests: Add lxd init --auto tests + - tests: Add test for root disk devices in profiles + - tests: Execute tests based on available tools + - tests: Fix mixed tab/spaces again + + -- Stéphane Graber Tue, 21 Feb 2017 00:11:01 -0500 + lxd (2.9.1-0ubuntu3) zesty; urgency=medium * Re-introduce gorilla/context as it's needed for backports (Go < 1.7) diff -Nru lxd-2.9.1/debian/.git-dpm lxd-2.9.2/debian/.git-dpm --- lxd-2.9.1/debian/.git-dpm 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/.git-dpm 2017-02-21 05:10:41.000000000 +0000 @@ -1,8 +1,8 @@ # see git-dpm(1) from git-dpm package -6c745ab235772aabc3a6da13c6585db53c3b466e -6c745ab235772aabc3a6da13c6585db53c3b466e -6a723d9d9974e2eab46d85c92de2707393d74ee6 -6a723d9d9974e2eab46d85c92de2707393d74ee6 -lxd_2.9.1.orig.tar.gz -a7d52600425715412b631170ade0f84ca01e596b -5247472 +185e3c538b39424957ef562d8f05db14777fd7f0 +185e3c538b39424957ef562d8f05db14777fd7f0 +185e3c538b39424957ef562d8f05db14777fd7f0 +185e3c538b39424957ef562d8f05db14777fd7f0 +lxd_2.9.2.orig.tar.gz +10af0d0ab93f963276455073168b9ff0f18288ba +5257370 diff -Nru lxd-2.9.1/debian/patches/0001-tests-Fix-mixed-tab-spaces-again.patch lxd-2.9.2/debian/patches/0001-tests-Fix-mixed-tab-spaces-again.patch --- lxd-2.9.1/debian/patches/0001-tests-Fix-mixed-tab-spaces-again.patch 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/0001-tests-Fix-mixed-tab-spaces-again.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -From 6e3f879aa7abd765d5510ca5347367e74aaa1b56 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?St=C3=A9phane=20Graber?= -Date: Thu, 16 Feb 2017 13:12:24 -0500 -Subject: tests: Fix mixed tab/spaces again -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Signed-off-by: Stéphane Graber ---- - test/suites/storage.sh | 58 +++++++++++++++++++++++++------------------------- - 1 file changed, 29 insertions(+), 29 deletions(-) - -diff --git a/test/suites/storage.sh b/test/suites/storage.sh -index 20d2c5e..e678a24 100644 ---- a/test/suites/storage.sh -+++ b/test/suites/storage.sh -@@ -128,18 +128,18 @@ test_storage() { - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" "${BACKEND}" - - if [ "${BACKEND}" = "zfs" ]; then -- # Let LXD use an already existing dataset. -- zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" -- -- # Let LXD use an already existing storage pool. -- configure_loop_device loop_file_4 loop_device_4 -- # shellcheck disable=SC2154 -- zpool create "lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" "${loop_device_4}" -f -m none -O compression=on -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" -- -- # Let LXD create a new dataset and use as pool. -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool" -+ # Let LXD use an already existing dataset. -+ zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" -+ -+ # Let LXD use an already existing storage pool. -+ configure_loop_device loop_file_4 loop_device_4 -+ # shellcheck disable=SC2154 -+ zpool create "lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" "${loop_device_4}" -f -m none -O compression=on -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" -+ -+ # Let LXD create a new dataset and use as pool. -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool" - fi - - # Create device backed zfs pool -@@ -201,14 +201,14 @@ test_storage() { - lxc list -c b c12pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" - - if [ "${BACKEND}" = "zfs" ]; then -- lxc launch testimage c13pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" -- lxc launch testimage c14pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" -+ lxc launch testimage c13pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" -+ lxc launch testimage c14pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" - -- lxc launch testimage c15pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" -- lxc launch testimage c16pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" -+ lxc launch testimage c15pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" -+ lxc launch testimage c16pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" - -- lxc launch testimage c17pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" -- lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" -+ lxc launch testimage c17pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" -+ lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" - fi - - lxc delete -f c1pool1 -@@ -230,24 +230,24 @@ test_storage() { - lxc delete -f c12pool6 - - if [ "${BACKEND}" = "zfs" ]; then -- lxc delete -f c13pool7 -- lxc delete -f c14pool7 -+ lxc delete -f c13pool7 -+ lxc delete -f c14pool7 - -- lxc delete -f c15pool8 -- lxc delete -f c16pool8 -+ lxc delete -f c15pool8 -+ lxc delete -f c16pool8 - -- lxc delete -f c17pool9 -- lxc delete -f c18pool9 -+ lxc delete -f c17pool9 -+ lxc delete -f c18pool9 - fi - - lxc image delete testimage - - if [ "${BACKEND}" = "zfs" ]; then -- lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool7" -- lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool8" -- lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool9" -- # shellcheck disable=SC2154 -- deconfigure_loop_device "${loop_file_4}" "${loop_device_4}" -+ lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool7" -+ lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool8" -+ lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool9" -+ # shellcheck disable=SC2154 -+ deconfigure_loop_device "${loop_file_4}" "${loop_device_4}" - fi - - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool2" diff -Nru lxd-2.9.1/debian/patches/0002-init-Fix-regressions-caused-by-storage-work.patch lxd-2.9.2/debian/patches/0002-init-Fix-regressions-caused-by-storage-work.patch --- lxd-2.9.1/debian/patches/0002-init-Fix-regressions-caused-by-storage-work.patch 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/0002-init-Fix-regressions-caused-by-storage-work.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -From 5bdeb31b97d0bde775025b86b9530ab726da11bd Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?St=C3=A9phane=20Graber?= -Date: Thu, 16 Feb 2017 15:11:16 -0500 -Subject: init: Fix regressions caused by storage work -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Signed-off-by: Stéphane Graber ---- - lxd/main.go | 2 +- - lxd/main_init.go | 25 ++++++++++++++----------- - 2 files changed, 15 insertions(+), 12 deletions(-) - -diff --git a/lxd/main.go b/lxd/main.go -index 52f4972..6d4bd47 100644 ---- a/lxd/main.go -+++ b/lxd/main.go -@@ -26,7 +26,7 @@ var argPrintGoroutinesEvery = gnuflag.Int("print-goroutines-every", -1, "") - var argStorageBackend = gnuflag.String("storage-backend", "", "") - var argStorageCreateDevice = gnuflag.String("storage-create-device", "", "") - var argStorageCreateLoop = gnuflag.Int64("storage-create-loop", -1, "") --var argStoragePool = gnuflag.String("storage-pool", "", "") -+var argStorageDataset = gnuflag.String("storage-pool", "", "") - var argSyslog = gnuflag.Bool("syslog", false, "") - var argTimeout = gnuflag.Int("timeout", -1, "") - var argTrustPassword = gnuflag.String("trust-password", "", "") -diff --git a/lxd/main_init.go b/lxd/main_init.go -index 0063bd3..d831113 100644 ---- a/lxd/main_init.go -+++ b/lxd/main_init.go -@@ -23,6 +23,7 @@ func cmdInit() error { - var storageLoopSize int64 // Size in GB - var storageDevice string // Path - var storagePool string // pool name -+ var storageDataset string // existing ZFS pool name - var networkAddress string // Address - var networkPort int64 // Port - var trustPassword string // Trust password -@@ -192,7 +193,7 @@ func cmdInit() error { - } - - if *argStorageBackend == "dir" { -- if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStoragePool != "" { -+ if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStorageDataset != "" { - return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend.") - } - } -@@ -202,7 +203,7 @@ func cmdInit() error { - return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified with the 'zfs' backend.") - } - -- if *argStoragePool == "" { -+ if *argStorageDataset == "" { - return fmt.Errorf("--storage-pool must be specified with the 'zfs' backend.") - } - } -@@ -219,13 +220,14 @@ func cmdInit() error { - storageBackend = *argStorageBackend - storageLoopSize = *argStorageCreateLoop - storageDevice = *argStorageCreateDevice -- storagePool = *argStoragePool -+ storageDataset = *argStorageDataset - networkAddress = *argNetworkAddress - networkPort = *argNetworkPort - trustPassword = *argTrustPassword -+ storagePool = "default" - storageSetup = true - } else { -- if *argStorageBackend != "" || *argStorageCreateDevice != "" || *argStorageCreateLoop != -1 || *argStoragePool != "" || *argNetworkAddress != "" || *argNetworkPort != -1 || *argTrustPassword != "" { -+ if *argStorageBackend != "" || *argStorageCreateDevice != "" || *argStorageCreateLoop != -1 || *argStorageDataset != "" || *argNetworkAddress != "" || *argNetworkPort != -1 || *argTrustPassword != "" { - return fmt.Errorf("Init configuration is only valid with --auto") - } - -@@ -278,7 +280,7 @@ func cmdInit() error { - storageLoopSize = askInt(q, 1, -1, fmt.Sprintf("%d", def)) - } - } else { -- storagePool = askString("Name of the existing ZFS pool or dataset: ", "", nil) -+ storageDataset = askString("Name of the existing ZFS pool or dataset: ", "", nil) - } - } - -@@ -360,14 +362,15 @@ they otherwise would. - } - } - -- // Destroy any existing loop device -- for _, file := range []string{"zfs.img"} { -- os.Remove(shared.VarPath(file)) -+ // Pool configuration -+ storageConfig := map[string]string{} -+ if storageDevice != "" { -+ storageConfig["source"] = storageDevice -+ } else if storageDataset != "" { -+ storageConfig["source"] = storageDataset - } - -- storageConfig := map[string]string{} -- storageConfig["source"] = storageDevice -- if storageBackend != "dir" { -+ if storageBackend != "dir" && storageLoopSize != 0 { - storageConfig["size"] = strconv.FormatInt(storageLoopSize, 10) + "GB" - } - diff -Nru lxd-2.9.1/debian/patches/0003-lxd-main_init-small-fixes.patch lxd-2.9.2/debian/patches/0003-lxd-main_init-small-fixes.patch --- lxd-2.9.1/debian/patches/0003-lxd-main_init-small-fixes.patch 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/0003-lxd-main_init-small-fixes.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -From de5ad2596ad6bd0ab95cbdd24bac1246762ec84f Mon Sep 17 00:00:00 2001 -From: Christian Brauner -Date: Thu, 16 Feb 2017 21:21:40 +0100 -Subject: lxd/main_init: small fixes - -Signed-off-by: Christian Brauner ---- - lxd/main_init.go | 19 +++++++++++++------ - 1 file changed, 13 insertions(+), 6 deletions(-) - -diff --git a/lxd/main_init.go b/lxd/main_init.go -index d831113..6fe86c4 100644 ---- a/lxd/main_init.go -+++ b/lxd/main_init.go -@@ -202,10 +202,6 @@ func cmdInit() error { - if *argStorageCreateLoop != -1 && *argStorageCreateDevice != "" { - return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified with the 'zfs' backend.") - } -- -- if *argStorageDataset == "" { -- return fmt.Errorf("--storage-pool must be specified with the 'zfs' backend.") -- } - } - - if *argNetworkAddress == "" { -@@ -251,6 +247,7 @@ func cmdInit() error { - } - - if storageSetup && storageBackend == "zfs" { -+ storageLoopSize = -1 - if askBool("Create a new ZFS pool (yes/no) [default=yes]? ", "yes") { - if askBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") { - deviceExists := func(path string) error { -@@ -366,11 +363,21 @@ they otherwise would. - storageConfig := map[string]string{} - if storageDevice != "" { - storageConfig["source"] = storageDevice -- } else if storageDataset != "" { -+ // The user probably wants to give the storage pool a -+ // custom name. -+ if storageDataset != "" { -+ storagePool = storageDataset -+ } -+ } else if storageDataset != "" && storageBackend == "zfs" && storageLoopSize < 0 { - storageConfig["source"] = storageDataset - } - -- if storageBackend != "dir" && storageLoopSize != 0 { -+ if storageBackend != "dir" && storageLoopSize > 0 { -+ // The user probably wants to give the storage pool a -+ // custom name. -+ if storageDataset != "" { -+ storagePool = storageDataset -+ } - storageConfig["size"] = strconv.FormatInt(storageLoopSize, 10) + "GB" - } - diff -Nru lxd-2.9.1/debian/patches/0004-test-add-lxd-init-auto-tests.patch lxd-2.9.2/debian/patches/0004-test-add-lxd-init-auto-tests.patch --- lxd-2.9.1/debian/patches/0004-test-add-lxd-init-auto-tests.patch 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/0004-test-add-lxd-init-auto-tests.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,342 +0,0 @@ -From ae72471cef24fd7cab4c98b2454d3e17ae4e80a7 Mon Sep 17 00:00:00 2001 -From: Christian Brauner -Date: Thu, 16 Feb 2017 23:11:55 +0100 -Subject: test: add lxd init --auto tests - -Signed-off-by: Christian Brauner ---- - test/main.sh | 108 +++++++++++++++++++++++++++++++++++++++++++++++++ - test/suites/init.sh | 81 +++++++++++++++++++++++++++++++++++++ - test/suites/storage.sh | 107 ------------------------------------------------ - 3 files changed, 189 insertions(+), 107 deletions(-) - create mode 100644 test/suites/init.sh - -diff --git a/test/main.sh b/test/main.sh -index d2e176e..be3c592 100755 ---- a/test/main.sh -+++ b/test/main.sh -@@ -397,6 +397,113 @@ wipe() { - rm -Rf "${1}" - } - -+configure_lvm_loop_device() { -+ lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.lvm) -+ truncate -s 4G "${lv_loop_file}" -+ pvloopdev=$(losetup --show -f "${lv_loop_file}") -+ if [ ! -e "${pvloopdev}" ]; then -+ echo "failed to setup loop" -+ false -+ fi -+ -+ pvcreate "${pvloopdev}" -+ -+ # The following code enables to return a value from a shell function by -+ # calling the function as: fun VAR1 -+ -+ # shellcheck disable=2039 -+ local __tmp1="${1}" -+ # shellcheck disable=2039 -+ local res1="${lv_loop_file}" -+ if [ "${__tmp1}" ]; then -+ eval "${__tmp1}='${res1}'" -+ fi -+ -+ # shellcheck disable=2039 -+ local __tmp2="${2}" -+ # shellcheck disable=2039 -+ local res2="${pvloopdev}" -+ if [ "${__tmp2}" ]; then -+ eval "${__tmp2}='${res2}'" -+ fi -+} -+ -+deconfigure_lvm_loop_device() { -+ lv_loop_file="${1}" -+ loopdev="${2}" -+ -+ SUCCESS=0 -+ # shellcheck disable=SC2034 -+ for i in $(seq 10); do -+ pvremove -f "${loopdev}" > /dev/null 2>&1 || true -+ if losetup -d "${loopdev}"; then -+ SUCCESS=1 -+ break -+ fi -+ -+ sleep 0.5 -+ done -+ -+ if [ "${SUCCESS}" = "0" ]; then -+ echo "Failed to tear down loop device." -+ false -+ fi -+ -+ rm -f "${lv_loop_file}" -+} -+ -+configure_loop_device() { -+ lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.img) -+ truncate -s 10G "${lv_loop_file}" -+ pvloopdev=$(losetup --show -f "${lv_loop_file}") -+ if [ ! -e "${pvloopdev}" ]; then -+ echo "failed to setup loop" -+ false -+ fi -+ -+ # The following code enables to return a value from a shell function by -+ # calling the function as: fun VAR1 -+ -+ # shellcheck disable=2039 -+ local __tmp1="${1}" -+ # shellcheck disable=2039 -+ local res1="${lv_loop_file}" -+ if [ "${__tmp1}" ]; then -+ eval "${__tmp1}='${res1}'" -+ fi -+ -+ # shellcheck disable=2039 -+ local __tmp2="${2}" -+ # shellcheck disable=2039 -+ local res2="${pvloopdev}" -+ if [ "${__tmp2}" ]; then -+ eval "${__tmp2}='${res2}'" -+ fi -+} -+ -+deconfigure_loop_device() { -+ lv_loop_file="${1}" -+ loopdev="${2}" -+ -+ SUCCESS=0 -+ # shellcheck disable=SC2034 -+ for i in $(seq 10); do -+ if losetup -d "${loopdev}"; then -+ SUCCESS=1 -+ break -+ fi -+ -+ sleep 0.5 -+ done -+ -+ if [ "${SUCCESS}" = "0" ]; then -+ echo "Failed to tear down loop device" -+ false -+ fi -+ -+ rm -f "${lv_loop_file}" -+} -+ - # Must be set before cleanup() - TEST_CURRENT=setup - TEST_RESULT=failure -@@ -479,5 +586,6 @@ run_test test_fdleak "fd leak" - run_test test_cpu_profiling "CPU profiling" - run_test test_mem_profiling "memory profiling" - run_test test_storage "storage" -+run_test test_lxd_autoinit "lxd init auto" - - TEST_RESULT=success -diff --git a/test/suites/init.sh b/test/suites/init.sh -new file mode 100644 -index 0000000..8a41846 ---- /dev/null -+++ b/test/suites/init.sh -@@ -0,0 +1,81 @@ -+#!/bin/sh -+ -+test_lxd_autoinit() { -+ # lxd init --auto -+ LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) -+ chmod +x "${LXD_INIT_DIR}" -+ spawn_lxd "${LXD_INIT_DIR}" false -+ -+ ZFS_POOL="lxdtest-$(basename "${LXD_DIR}")-init" -+ LXD_DIR=${LXD_INIT_DIR} lxd init --auto -+ -+ kill_lxd "${LXD_INIT_DIR}" -+ -+ # lxd init --auto --storage-backend zfs -+ if [ "${LXD_BACKEND}" = "zfs" ]; then -+ LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) -+ chmod +x "${LXD_INIT_DIR}" -+ spawn_lxd "${LXD_INIT_DIR}" false -+ -+ LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs -+ -+ kill_lxd "${LXD_INIT_DIR}" -+ fi -+ -+ # lxd init --auto --storage-backend zfs --storage-pool -+ if [ "${LXD_BACKEND}" = "zfs" ]; then -+ LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) -+ chmod +x "${LXD_INIT_DIR}" -+ spawn_lxd "${LXD_INIT_DIR}" false -+ -+ configure_loop_device loop_file_1 loop_device_1 -+ zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on -+ LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" -+ -+ kill_lxd "${LXD_INIT_DIR}" -+ deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ fi -+ -+ # lxd init --auto --storage-backend zfs --storage-pool / -+ if [ "${LXD_BACKEND}" = "zfs" ]; then -+ LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) -+ chmod +x "${LXD_INIT_DIR}" -+ spawn_lxd "${LXD_INIT_DIR}" false -+ -+ configure_loop_device loop_file_1 loop_device_1 -+ zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on -+ LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/non-existing-dataset" -+ -+ kill_lxd "${LXD_INIT_DIR}" -+ deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" -+ fi -+ -+ # lxd init --auto --storage-backend zfs --storage-pool / -+ if [ "${LXD_BACKEND}" = "zfs" ]; then -+ LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) -+ chmod +x "${LXD_INIT_DIR}" -+ spawn_lxd "${LXD_INIT_DIR}" false -+ -+ configure_loop_device loop_file_1 loop_device_1 -+ zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -f -m none -O compression=on -+ zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" -+ LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" -+ -+ kill_lxd "${LXD_INIT_DIR}" -+ deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" -+ fi -+ -+ # lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool --auto -+ if [ "${LXD_BACKEND}" = "zfs" ]; then -+ LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) -+ chmod +x "${LXD_INIT_DIR}" -+ spawn_lxd "${LXD_INIT_DIR}" false -+ -+ ZFS_POOL="lxdtest-$(basename "${LXD_DIR}")-init" -+ LXD_DIR=${LXD_INIT_DIR} lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool "${ZFS_POOL}" --auto -+ -+ kill_lxd "${LXD_INIT_DIR}" -+ fi -+} -diff --git a/test/suites/storage.sh b/test/suites/storage.sh -index e678a24..ad8eead 100644 ---- a/test/suites/storage.sh -+++ b/test/suites/storage.sh -@@ -1,112 +1,5 @@ - #!/bin/sh - --configure_lvm_loop_device() { -- lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.lvm) -- truncate -s 4G "${lv_loop_file}" -- pvloopdev=$(losetup --show -f "${lv_loop_file}") -- if [ ! -e "${pvloopdev}" ]; then -- echo "failed to setup loop" -- false -- fi -- -- pvcreate "${pvloopdev}" -- -- # The following code enables to return a value from a shell function by -- # calling the function as: fun VAR1 -- -- # shellcheck disable=2039 -- local __tmp1="${1}" -- # shellcheck disable=2039 -- local res1="${lv_loop_file}" -- if [ "${__tmp1}" ]; then -- eval "${__tmp1}='${res1}'" -- fi -- -- # shellcheck disable=2039 -- local __tmp2="${2}" -- # shellcheck disable=2039 -- local res2="${pvloopdev}" -- if [ "${__tmp2}" ]; then -- eval "${__tmp2}='${res2}'" -- fi --} -- --deconfigure_lvm_loop_device() { -- lv_loop_file="${1}" -- loopdev="${2}" -- -- SUCCESS=0 -- # shellcheck disable=SC2034 -- for i in $(seq 10); do -- pvremove -f "${loopdev}" > /dev/null 2>&1 || true -- if losetup -d "${loopdev}"; then -- SUCCESS=1 -- break -- fi -- -- sleep 0.5 -- done -- -- if [ "${SUCCESS}" = "0" ]; then -- echo "Failed to tear down loop device." -- false -- fi -- -- rm -f "${lv_loop_file}" --} -- --configure_loop_device() { -- lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.img) -- truncate -s 10G "${lv_loop_file}" -- pvloopdev=$(losetup --show -f "${lv_loop_file}") -- if [ ! -e "${pvloopdev}" ]; then -- echo "failed to setup loop" -- false -- fi -- -- # The following code enables to return a value from a shell function by -- # calling the function as: fun VAR1 -- -- # shellcheck disable=2039 -- local __tmp1="${1}" -- # shellcheck disable=2039 -- local res1="${lv_loop_file}" -- if [ "${__tmp1}" ]; then -- eval "${__tmp1}='${res1}'" -- fi -- -- # shellcheck disable=2039 -- local __tmp2="${2}" -- # shellcheck disable=2039 -- local res2="${pvloopdev}" -- if [ "${__tmp2}" ]; then -- eval "${__tmp2}='${res2}'" -- fi --} -- --deconfigure_loop_device() { -- lv_loop_file="${1}" -- loopdev="${2}" -- -- SUCCESS=0 -- # shellcheck disable=SC2034 -- for i in $(seq 10); do -- if losetup -d "${loopdev}"; then -- SUCCESS=1 -- break -- fi -- -- sleep 0.5 -- done -- -- if [ "${SUCCESS}" = "0" ]; then -- echo "Failed to tear down loop device" -- false -- fi -- -- rm -f "${lv_loop_file}" --} -- - test_storage() { - LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX) - chmod +x "${LXD_STORAGE_DIR}" diff -Nru lxd-2.9.1/debian/patches/0005-test-execute-tests-based-on-available-tools.patch lxd-2.9.2/debian/patches/0005-test-execute-tests-based-on-available-tools.patch --- lxd-2.9.1/debian/patches/0005-test-execute-tests-based-on-available-tools.patch 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/0005-test-execute-tests-based-on-available-tools.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,309 +0,0 @@ -From 31c03b98bc1c9f72b6d16366719480b0b8539fa3 Mon Sep 17 00:00:00 2001 -From: Christian Brauner -Date: Thu, 16 Feb 2017 23:35:13 +0100 -Subject: test: execute tests based on available tools - -Signed-off-by: Christian Brauner ---- - test/suites/init.sh | 7 ++ - test/suites/storage.sh | 171 +++++++++++++++++++++++++++---------------------- - 2 files changed, 102 insertions(+), 76 deletions(-) - -diff --git a/test/suites/init.sh b/test/suites/init.sh -index 8a41846..77756b6 100644 ---- a/test/suites/init.sh -+++ b/test/suites/init.sh -@@ -28,11 +28,14 @@ test_lxd_autoinit() { - chmod +x "${LXD_INIT_DIR}" - spawn_lxd "${LXD_INIT_DIR}" false - -+ # shellcheck disable=SC2154 - configure_loop_device loop_file_1 loop_device_1 -+ # shellcheck disable=SC2154 - zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on - LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" - - kill_lxd "${LXD_INIT_DIR}" -+ # shellcheck disable=SC2154 - deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" - fi - -@@ -43,11 +46,13 @@ test_lxd_autoinit() { - spawn_lxd "${LXD_INIT_DIR}" false - - configure_loop_device loop_file_1 loop_device_1 -+ # shellcheck disable=SC2154 - zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on - LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/non-existing-dataset" - - kill_lxd "${LXD_INIT_DIR}" - deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ # shellcheck disable=SC2154 - zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" - fi - -@@ -58,12 +63,14 @@ test_lxd_autoinit() { - spawn_lxd "${LXD_INIT_DIR}" false - - configure_loop_device loop_file_1 loop_device_1 -+ # shellcheck disable=SC2154 - zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -f -m none -O compression=on - zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" - LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" - - kill_lxd "${LXD_INIT_DIR}" - deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ # shellcheck disable=SC2154 - zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" - fi - -diff --git a/test/suites/storage.sh b/test/suites/storage.sh -index ad8eead..4eddeaf 100644 ---- a/test/suites/storage.sh -+++ b/test/suites/storage.sh -@@ -1,6 +1,8 @@ - #!/bin/sh - - test_storage() { -+ # shellcheck disable=2039 -+ - LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX) - chmod +x "${LXD_STORAGE_DIR}" - spawn_lxd "${LXD_STORAGE_DIR}" false -@@ -9,91 +11,98 @@ test_storage() { - # shellcheck disable=2030 - LXD_DIR="${LXD_STORAGE_DIR}" - -- # Only create zfs pools on 64 bit arches. I think getconf LONG_BIT should -- # even work when running a 32bit userspace on a 64 bit kernel. -- ARCH=$(getconf LONG_BIT) -- BACKEND=btrfs -- if [ "${ARCH}" = "64" ]; then -- BACKEND=zfs -- fi -- -+ # shellcheck disable=SC1009 -+ if which zfs >/dev/null 2>&1; then - # Create loop file zfs pool. -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" "${BACKEND}" -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" zfs - -- if [ "${BACKEND}" = "zfs" ]; then - # Let LXD use an already existing dataset. - zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" - - # Let LXD use an already existing storage pool. - configure_loop_device loop_file_4 loop_device_4 - # shellcheck disable=SC2154 - zpool create "lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" "${loop_device_4}" -f -m none -O compression=on -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" - - # Let LXD create a new dataset and use as pool. -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool" -- fi -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool" - -- # Create device backed zfs pool -- configure_loop_device loop_file_1 loop_device_1 -- # shellcheck disable=SC2154 -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" "${BACKEND}" source="${loop_device_1}" -+ # Create device backed zfs pool -+ configure_loop_device loop_file_1 loop_device_1 -+ # shellcheck disable=SC2154 -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" zfs source="${loop_device_1}" -+ fi - -- # Create loop file btrfs pool. -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool3" btrfs -+ if which btrfs >/dev/null 2>&1; then -+ # Create loop file btrfs pool. -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool3" btrfs - -- # Create device backed btrfs pool. -- configure_loop_device loop_file_2 loop_device_2 -- # shellcheck disable=SC2154 -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" btrfs source="${loop_device_2}" -+ # Create device backed btrfs pool. -+ configure_loop_device loop_file_2 loop_device_2 -+ # shellcheck disable=SC2154 -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" btrfs source="${loop_device_2}" -+ fi - - # Create dir pool. - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool5" dir - -- # Create lvm pool. -- configure_lvm_loop_device loop_file_3 loop_device_3 -- # shellcheck disable=SC2154 -- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm source="${loop_device_3}" -+ if which lvdisplay >/dev/null 2>&1; then -+ # Create lvm pool. -+ configure_lvm_loop_device loop_file_3 loop_device_3 -+ # shellcheck disable=SC2154 -+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm source="${loop_device_3}" -+ fi - - # Set default storage pool for image import. -- lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" -+ lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool5" - - # Import image into default storage pool. - ensure_import_testimage - - # Muck around with some containers on various pools. -- lxc init testimage c1pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" -- lxc list -c b c1pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" -- lxc init testimage c2pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -- lxc list -c b c2pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" -- -- lxc launch testimage c3pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" -- lxc list -c b c3pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" -- lxc launch testimage c4pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -- lxc list -c b c4pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" -- -- lxc init testimage c5pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" -- lxc list -c b c5pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" -- lxc init testimage c6pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" -- lxc list -c b c6pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" -- -- lxc launch testimage c7pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" -- lxc list -c b c7pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" -- lxc launch testimage c8pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" -- lxc list -c b c8pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" -+ if which zfs >/dev/null 2>&1; then -+ lxc init testimage c1pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" -+ lxc list -c b c1pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" -+ -+ lxc init testimage c2pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -+ lxc list -c b c2pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" -+ -+ lxc launch testimage c3pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" -+ lxc list -c b c3pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" -+ -+ lxc launch testimage c4pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -+ lxc list -c b c4pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" -+ fi -+ -+ if which btrfs >/dev/null 2>&1; then -+ lxc init testimage c5pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" -+ lxc list -c b c5pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" -+ lxc init testimage c6pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" -+ lxc list -c b c6pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" -+ -+ lxc launch testimage c7pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" -+ lxc list -c b c7pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" -+ lxc launch testimage c8pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" -+ lxc list -c b c8pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" -+ fi - - lxc init testimage c9pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5" - lxc list -c b c9pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5" -- lxc init testimage c10pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" -- lxc list -c b c10pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" - - lxc launch testimage c11pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5" - lxc list -c b c11pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5" -- lxc launch testimage c12pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" -- lxc list -c b c12pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" - -- if [ "${BACKEND}" = "zfs" ]; then -+ if which lvdisplay >/dev/null 2>&1; then -+ lxc init testimage c10pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" -+ lxc list -c b c10pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" -+ -+ lxc launch testimage c12pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" -+ lxc list -c b c12pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" -+ fi -+ -+ if which zfs >/dev/null 2>&1; then - lxc launch testimage c13pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" - lxc launch testimage c14pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" - -@@ -104,25 +113,31 @@ test_storage() { - lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" - fi - -- lxc delete -f c1pool1 -- lxc delete -f c2pool2 -+ if which zfs >/dev/null 2>&1; then -+ lxc delete -f c1pool1 -+ lxc delete -f c3pool1 - -- lxc delete -f c3pool1 -- lxc delete -f c4pool2 -+ lxc delete -f c4pool2 -+ lxc delete -f c2pool2 -+ fi - -- lxc delete -f c5pool3 -- lxc delete -f c6pool4 -+ if which btrfs >/dev/null 2>&1; then -+ lxc delete -f c5pool3 -+ lxc delete -f c7pool3 - -- lxc delete -f c7pool3 -- lxc delete -f c8pool4 -+ lxc delete -f c8pool4 -+ lxc delete -f c6pool4 -+ fi - - lxc delete -f c9pool5 -- lxc delete -f c10pool6 -- - lxc delete -f c11pool5 -- lxc delete -f c12pool6 - -- if [ "${BACKEND}" = "zfs" ]; then -+ if which lvdisplay >/dev/null 2>&1; then -+ lxc delete -f c10pool6 -+ lxc delete -f c12pool6 -+ fi -+ -+ if which zfs >/dev/null 2>&1; then - lxc delete -f c13pool7 - lxc delete -f c14pool7 - -@@ -135,25 +150,29 @@ test_storage() { - - lxc image delete testimage - -- if [ "${BACKEND}" = "zfs" ]; then -+ if which zfs >/dev/null 2>&1; then - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool7" - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool8" - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool9" - # shellcheck disable=SC2154 - deconfigure_loop_device "${loop_file_4}" "${loop_device_4}" -- fi - -- lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool2" -- # shellcheck disable=SC2154 -- deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool2" -+ # shellcheck disable=SC2154 -+ deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" -+ fi - -- lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool4" -- # shellcheck disable=SC2154 -- deconfigure_loop_device "${loop_file_2}" "${loop_device_2}" -+ if which btrfs >/dev/null 2>&1; then -+ lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool4" -+ # shellcheck disable=SC2154 -+ deconfigure_loop_device "${loop_file_2}" "${loop_device_2}" -+ fi - -- lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool6" -- # shellcheck disable=SC2154 -- deconfigure_lvm_loop_device "${loop_file_3}" "${loop_device_3}" -+ if which lvdisplay >/dev/null 2>&1; then -+ lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool6" -+ # shellcheck disable=SC2154 -+ deconfigure_lvm_loop_device "${loop_file_3}" "${loop_device_3}" -+ fi - ) - - # shellcheck disable=SC2031 diff -Nru lxd-2.9.1/debian/patches/0006-Re-introduce-gorilla-context.patch lxd-2.9.2/debian/patches/0006-Re-introduce-gorilla-context.patch --- lxd-2.9.1/debian/patches/0006-Re-introduce-gorilla-context.patch 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/0006-Re-introduce-gorilla-context.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,508 +0,0 @@ -From 6c745ab235772aabc3a6da13c6585db53c3b466e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?St=C3=A9phane=20Graber?= -Date: Fri, 17 Feb 2017 16:12:10 -0500 -Subject: Re-introduce gorilla/context -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Signed-off-by: Stéphane Graber ---- - dist/src/github.com/gorilla/context/.travis.yml | 19 +++ - dist/src/github.com/gorilla/context/LICENSE | 27 ++++ - dist/src/github.com/gorilla/context/README.md | 10 ++ - dist/src/github.com/gorilla/context/context.go | 143 ++++++++++++++++++ - .../src/github.com/gorilla/context/context_test.go | 161 +++++++++++++++++++++ - dist/src/github.com/gorilla/context/doc.go | 88 +++++++++++ - 6 files changed, 448 insertions(+) - create mode 100644 dist/src/github.com/gorilla/context/.travis.yml - create mode 100644 dist/src/github.com/gorilla/context/LICENSE - create mode 100644 dist/src/github.com/gorilla/context/README.md - create mode 100644 dist/src/github.com/gorilla/context/context.go - create mode 100644 dist/src/github.com/gorilla/context/context_test.go - create mode 100644 dist/src/github.com/gorilla/context/doc.go - -diff --git a/dist/src/github.com/gorilla/context/.travis.yml b/dist/src/github.com/gorilla/context/.travis.yml -new file mode 100644 -index 0000000..6f440f1 ---- /dev/null -+++ b/dist/src/github.com/gorilla/context/.travis.yml -@@ -0,0 +1,19 @@ -+language: go -+sudo: false -+ -+matrix: -+ include: -+ - go: 1.3 -+ - go: 1.4 -+ - go: 1.5 -+ - go: 1.6 -+ - go: 1.7 -+ - go: tip -+ allow_failures: -+ - go: tip -+ -+script: -+ - go get -t -v ./... -+ - diff -u <(echo -n) <(gofmt -d .) -+ - go vet $(go list ./... | grep -v /vendor/) -+ - go test -v -race ./... -diff --git a/dist/src/github.com/gorilla/context/LICENSE b/dist/src/github.com/gorilla/context/LICENSE -new file mode 100644 -index 0000000..0e5fb87 ---- /dev/null -+++ b/dist/src/github.com/gorilla/context/LICENSE -@@ -0,0 +1,27 @@ -+Copyright (c) 2012 Rodrigo Moraes. All rights reserved. -+ -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are -+met: -+ -+ * Redistributions of source code must retain the above copyright -+notice, this list of conditions and the following disclaimer. -+ * Redistributions in binary form must reproduce the above -+copyright notice, this list of conditions and the following disclaimer -+in the documentation and/or other materials provided with the -+distribution. -+ * Neither the name of Google Inc. nor the names of its -+contributors may be used to endorse or promote products derived from -+this software without specific prior written permission. -+ -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -diff --git a/dist/src/github.com/gorilla/context/README.md b/dist/src/github.com/gorilla/context/README.md -new file mode 100644 -index 0000000..08f8669 ---- /dev/null -+++ b/dist/src/github.com/gorilla/context/README.md -@@ -0,0 +1,10 @@ -+context -+======= -+[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) -+ -+gorilla/context is a general purpose registry for global request variables. -+ -+> Note: gorilla/context, having been born well before `context.Context` existed, does not play well -+> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. -+ -+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context -diff --git a/dist/src/github.com/gorilla/context/context.go b/dist/src/github.com/gorilla/context/context.go -new file mode 100644 -index 0000000..81cb128 ---- /dev/null -+++ b/dist/src/github.com/gorilla/context/context.go -@@ -0,0 +1,143 @@ -+// Copyright 2012 The Gorilla Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+package context -+ -+import ( -+ "net/http" -+ "sync" -+ "time" -+) -+ -+var ( -+ mutex sync.RWMutex -+ data = make(map[*http.Request]map[interface{}]interface{}) -+ datat = make(map[*http.Request]int64) -+) -+ -+// Set stores a value for a given key in a given request. -+func Set(r *http.Request, key, val interface{}) { -+ mutex.Lock() -+ if data[r] == nil { -+ data[r] = make(map[interface{}]interface{}) -+ datat[r] = time.Now().Unix() -+ } -+ data[r][key] = val -+ mutex.Unlock() -+} -+ -+// Get returns a value stored for a given key in a given request. -+func Get(r *http.Request, key interface{}) interface{} { -+ mutex.RLock() -+ if ctx := data[r]; ctx != nil { -+ value := ctx[key] -+ mutex.RUnlock() -+ return value -+ } -+ mutex.RUnlock() -+ return nil -+} -+ -+// GetOk returns stored value and presence state like multi-value return of map access. -+func GetOk(r *http.Request, key interface{}) (interface{}, bool) { -+ mutex.RLock() -+ if _, ok := data[r]; ok { -+ value, ok := data[r][key] -+ mutex.RUnlock() -+ return value, ok -+ } -+ mutex.RUnlock() -+ return nil, false -+} -+ -+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -+func GetAll(r *http.Request) map[interface{}]interface{} { -+ mutex.RLock() -+ if context, ok := data[r]; ok { -+ result := make(map[interface{}]interface{}, len(context)) -+ for k, v := range context { -+ result[k] = v -+ } -+ mutex.RUnlock() -+ return result -+ } -+ mutex.RUnlock() -+ return nil -+} -+ -+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -+// the request was registered. -+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { -+ mutex.RLock() -+ context, ok := data[r] -+ result := make(map[interface{}]interface{}, len(context)) -+ for k, v := range context { -+ result[k] = v -+ } -+ mutex.RUnlock() -+ return result, ok -+} -+ -+// Delete removes a value stored for a given key in a given request. -+func Delete(r *http.Request, key interface{}) { -+ mutex.Lock() -+ if data[r] != nil { -+ delete(data[r], key) -+ } -+ mutex.Unlock() -+} -+ -+// Clear removes all values stored for a given request. -+// -+// This is usually called by a handler wrapper to clean up request -+// variables at the end of a request lifetime. See ClearHandler(). -+func Clear(r *http.Request) { -+ mutex.Lock() -+ clear(r) -+ mutex.Unlock() -+} -+ -+// clear is Clear without the lock. -+func clear(r *http.Request) { -+ delete(data, r) -+ delete(datat, r) -+} -+ -+// Purge removes request data stored for longer than maxAge, in seconds. -+// It returns the amount of requests removed. -+// -+// If maxAge <= 0, all request data is removed. -+// -+// This is only used for sanity check: in case context cleaning was not -+// properly set some request data can be kept forever, consuming an increasing -+// amount of memory. In case this is detected, Purge() must be called -+// periodically until the problem is fixed. -+func Purge(maxAge int) int { -+ mutex.Lock() -+ count := 0 -+ if maxAge <= 0 { -+ count = len(data) -+ data = make(map[*http.Request]map[interface{}]interface{}) -+ datat = make(map[*http.Request]int64) -+ } else { -+ min := time.Now().Unix() - int64(maxAge) -+ for r := range data { -+ if datat[r] < min { -+ clear(r) -+ count++ -+ } -+ } -+ } -+ mutex.Unlock() -+ return count -+} -+ -+// ClearHandler wraps an http.Handler and clears request values at the end -+// of a request lifetime. -+func ClearHandler(h http.Handler) http.Handler { -+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -+ defer Clear(r) -+ h.ServeHTTP(w, r) -+ }) -+} -diff --git a/dist/src/github.com/gorilla/context/context_test.go b/dist/src/github.com/gorilla/context/context_test.go -new file mode 100644 -index 0000000..d70e91a ---- /dev/null -+++ b/dist/src/github.com/gorilla/context/context_test.go -@@ -0,0 +1,161 @@ -+// Copyright 2012 The Gorilla Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+package context -+ -+import ( -+ "net/http" -+ "testing" -+) -+ -+type keyType int -+ -+const ( -+ key1 keyType = iota -+ key2 -+) -+ -+func TestContext(t *testing.T) { -+ assertEqual := func(val interface{}, exp interface{}) { -+ if val != exp { -+ t.Errorf("Expected %v, got %v.", exp, val) -+ } -+ } -+ -+ r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) -+ emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) -+ -+ // Get() -+ assertEqual(Get(r, key1), nil) -+ -+ // Set() -+ Set(r, key1, "1") -+ assertEqual(Get(r, key1), "1") -+ assertEqual(len(data[r]), 1) -+ -+ Set(r, key2, "2") -+ assertEqual(Get(r, key2), "2") -+ assertEqual(len(data[r]), 2) -+ -+ //GetOk -+ value, ok := GetOk(r, key1) -+ assertEqual(value, "1") -+ assertEqual(ok, true) -+ -+ value, ok = GetOk(r, "not exists") -+ assertEqual(value, nil) -+ assertEqual(ok, false) -+ -+ Set(r, "nil value", nil) -+ value, ok = GetOk(r, "nil value") -+ assertEqual(value, nil) -+ assertEqual(ok, true) -+ -+ // GetAll() -+ values := GetAll(r) -+ assertEqual(len(values), 3) -+ -+ // GetAll() for empty request -+ values = GetAll(emptyR) -+ if values != nil { -+ t.Error("GetAll didn't return nil value for invalid request") -+ } -+ -+ // GetAllOk() -+ values, ok = GetAllOk(r) -+ assertEqual(len(values), 3) -+ assertEqual(ok, true) -+ -+ // GetAllOk() for empty request -+ values, ok = GetAllOk(emptyR) -+ assertEqual(len(values), 0) -+ assertEqual(ok, false) -+ -+ // Delete() -+ Delete(r, key1) -+ assertEqual(Get(r, key1), nil) -+ assertEqual(len(data[r]), 2) -+ -+ Delete(r, key2) -+ assertEqual(Get(r, key2), nil) -+ assertEqual(len(data[r]), 1) -+ -+ // Clear() -+ Clear(r) -+ assertEqual(len(data), 0) -+} -+ -+func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { -+ <-wait -+ for i := 0; i < iterations; i++ { -+ Get(r, key) -+ } -+ done <- struct{}{} -+ -+} -+ -+func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { -+ <-wait -+ for i := 0; i < iterations; i++ { -+ Set(r, key, value) -+ } -+ done <- struct{}{} -+ -+} -+ -+func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { -+ -+ b.StopTimer() -+ r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) -+ done := make(chan struct{}) -+ b.StartTimer() -+ -+ for i := 0; i < b.N; i++ { -+ wait := make(chan struct{}) -+ -+ for i := 0; i < numReaders; i++ { -+ go parallelReader(r, "test", iterations, wait, done) -+ } -+ -+ for i := 0; i < numWriters; i++ { -+ go parallelWriter(r, "test", "123", iterations, wait, done) -+ } -+ -+ close(wait) -+ -+ for i := 0; i < numReaders+numWriters; i++ { -+ <-done -+ } -+ -+ } -+ -+} -+ -+func BenchmarkMutexSameReadWrite1(b *testing.B) { -+ benchmarkMutex(b, 1, 1, 32) -+} -+func BenchmarkMutexSameReadWrite2(b *testing.B) { -+ benchmarkMutex(b, 2, 2, 32) -+} -+func BenchmarkMutexSameReadWrite4(b *testing.B) { -+ benchmarkMutex(b, 4, 4, 32) -+} -+func BenchmarkMutex1(b *testing.B) { -+ benchmarkMutex(b, 2, 8, 32) -+} -+func BenchmarkMutex2(b *testing.B) { -+ benchmarkMutex(b, 16, 4, 64) -+} -+func BenchmarkMutex3(b *testing.B) { -+ benchmarkMutex(b, 1, 2, 128) -+} -+func BenchmarkMutex4(b *testing.B) { -+ benchmarkMutex(b, 128, 32, 256) -+} -+func BenchmarkMutex5(b *testing.B) { -+ benchmarkMutex(b, 1024, 2048, 64) -+} -+func BenchmarkMutex6(b *testing.B) { -+ benchmarkMutex(b, 2048, 1024, 512) -+} -diff --git a/dist/src/github.com/gorilla/context/doc.go b/dist/src/github.com/gorilla/context/doc.go -new file mode 100644 -index 0000000..448d1bf ---- /dev/null -+++ b/dist/src/github.com/gorilla/context/doc.go -@@ -0,0 +1,88 @@ -+// Copyright 2012 The Gorilla Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+/* -+Package context stores values shared during a request lifetime. -+ -+Note: gorilla/context, having been born well before `context.Context` existed, -+does not play well > with the shallow copying of the request that -+[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) -+(added to net/http Go 1.7 onwards) performs. You should either use *just* -+gorilla/context, or moving forward, the new `http.Request.Context()`. -+ -+For example, a router can set variables extracted from the URL and later -+application handlers can access those values, or it can be used to store -+sessions values to be saved at the end of a request. There are several -+others common uses. -+ -+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: -+ -+ http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 -+ -+Here's the basic usage: first define the keys that you will need. The key -+type is interface{} so a key can be of any type that supports equality. -+Here we define a key using a custom int type to avoid name collisions: -+ -+ package foo -+ -+ import ( -+ "github.com/gorilla/context" -+ ) -+ -+ type key int -+ -+ const MyKey key = 0 -+ -+Then set a variable. Variables are bound to an http.Request object, so you -+need a request instance to set a value: -+ -+ context.Set(r, MyKey, "bar") -+ -+The application can later access the variable using the same key you provided: -+ -+ func MyHandler(w http.ResponseWriter, r *http.Request) { -+ // val is "bar". -+ val := context.Get(r, foo.MyKey) -+ -+ // returns ("bar", true) -+ val, ok := context.GetOk(r, foo.MyKey) -+ // ... -+ } -+ -+And that's all about the basic usage. We discuss some other ideas below. -+ -+Any type can be stored in the context. To enforce a given type, make the key -+private and wrap Get() and Set() to accept and return values of a specific -+type: -+ -+ type key int -+ -+ const mykey key = 0 -+ -+ // GetMyKey returns a value for this package from the request values. -+ func GetMyKey(r *http.Request) SomeType { -+ if rv := context.Get(r, mykey); rv != nil { -+ return rv.(SomeType) -+ } -+ return nil -+ } -+ -+ // SetMyKey sets a value for this package in the request values. -+ func SetMyKey(r *http.Request, val SomeType) { -+ context.Set(r, mykey, val) -+ } -+ -+Variables must be cleared at the end of a request, to remove all values -+that were stored. This can be done in an http.Handler, after a request was -+served. Just call Clear() passing the request: -+ -+ context.Clear(r) -+ -+...or use ClearHandler(), which conveniently wraps an http.Handler to clear -+variables at the end of a request lifetime. -+ -+The Routers from the packages gorilla/mux and gorilla/pat call Clear() -+so if you are using either of them you don't need to clear the context manually. -+*/ -+package context diff -Nru lxd-2.9.1/debian/patches/series lxd-2.9.2/debian/patches/series --- lxd-2.9.1/debian/patches/series 2017-02-17 21:12:32.000000000 +0000 +++ lxd-2.9.2/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -0001-tests-Fix-mixed-tab-spaces-again.patch -0002-init-Fix-regressions-caused-by-storage-work.patch -0003-lxd-main_init-small-fixes.patch -0004-test-add-lxd-init-auto-tests.patch -0005-test-execute-tests-based-on-available-tools.patch -0006-Re-introduce-gorilla-context.patch diff -Nru lxd-2.9.1/dist/src/github.com/golang/protobuf/descriptor/descriptor.go lxd-2.9.2/dist/src/github.com/golang/protobuf/descriptor/descriptor.go --- lxd-2.9.1/dist/src/github.com/golang/protobuf/descriptor/descriptor.go 2017-02-16 17:27:47.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/golang/protobuf/descriptor/descriptor.go 2017-02-21 04:43:11.000000000 +0000 @@ -43,7 +43,7 @@ "io/ioutil" "github.com/golang/protobuf/proto" - protobuf "google.golang.org/genproto/protobuf" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" ) // extractFile extracts a FileDescriptorProto from a gzip'd buffer. diff -Nru lxd-2.9.1/dist/src/github.com/golang/protobuf/descriptor/descriptor_test.go lxd-2.9.2/dist/src/github.com/golang/protobuf/descriptor/descriptor_test.go --- lxd-2.9.1/dist/src/github.com/golang/protobuf/descriptor/descriptor_test.go 2017-02-16 17:27:47.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/golang/protobuf/descriptor/descriptor_test.go 2017-02-21 04:43:11.000000000 +0000 @@ -6,7 +6,7 @@ "github.com/golang/protobuf/descriptor" tpb "github.com/golang/protobuf/proto/testdata" - protobuf "google.golang.org/genproto/protobuf" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" ) func TestMessage(t *testing.T) { diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/context/context.go lxd-2.9.2/dist/src/github.com/gorilla/context/context.go --- lxd-2.9.1/dist/src/github.com/gorilla/context/context.go 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/context/context.go 2017-02-21 04:43:21.000000000 +0000 @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/context/context_test.go lxd-2.9.2/dist/src/github.com/gorilla/context/context_test.go --- lxd-2.9.1/dist/src/github.com/gorilla/context/context_test.go 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/context/context_test.go 2017-02-21 04:43:21.000000000 +0000 @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(len(values), 0) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/context/doc.go lxd-2.9.2/dist/src/github.com/gorilla/context/doc.go --- lxd-2.9.1/dist/src/github.com/gorilla/context/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/context/doc.go 2017-02-21 04:43:21.000000000 +0000 @@ -0,0 +1,88 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +Note: gorilla/context, having been born well before `context.Context` existed, +does not play well > with the shallow copying of the request that +[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) +(added to net/http Go 1.7 onwards) performs. You should either use *just* +gorilla/context, or moving forward, the new `http.Request.Context()`. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/context/LICENSE lxd-2.9.2/dist/src/github.com/gorilla/context/LICENSE --- lxd-2.9.1/dist/src/github.com/gorilla/context/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/context/LICENSE 2017-02-21 04:43:21.000000000 +0000 @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/context/README.md lxd-2.9.2/dist/src/github.com/gorilla/context/README.md --- lxd-2.9.1/dist/src/github.com/gorilla/context/README.md 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/context/README.md 2017-02-21 04:43:21.000000000 +0000 @@ -0,0 +1,10 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +> Note: gorilla/context, having been born well before `context.Context` existed, does not play well +> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/context/.travis.yml lxd-2.9.2/dist/src/github.com/gorilla/context/.travis.yml --- lxd-2.9.1/dist/src/github.com/gorilla/context/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/context/.travis.yml 2017-02-21 04:43:21.000000000 +0000 @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/mux/.travis.yml lxd-2.9.2/dist/src/github.com/gorilla/mux/.travis.yml --- lxd-2.9.1/dist/src/github.com/gorilla/mux/.travis.yml 2017-02-16 17:27:48.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/mux/.travis.yml 2017-02-21 04:43:12.000000000 +0000 @@ -9,6 +9,7 @@ - go: 1.5 - go: 1.6 - go: 1.7 + - go: 1.8 - go: tip install: diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/websocket/client_clone.go lxd-2.9.2/dist/src/github.com/gorilla/websocket/client_clone.go --- lxd-2.9.1/dist/src/github.com/gorilla/websocket/client_clone.go 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/websocket/client_clone.go 2017-02-21 04:43:03.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/websocket/client_clone_legacy.go lxd-2.9.2/dist/src/github.com/gorilla/websocket/client_clone_legacy.go --- lxd-2.9.1/dist/src/github.com/gorilla/websocket/client_clone_legacy.go 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/websocket/client_clone_legacy.go 2017-02-21 04:43:03.000000000 +0000 @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/websocket/client.go lxd-2.9.2/dist/src/github.com/gorilla/websocket/client.go --- lxd-2.9.1/dist/src/github.com/gorilla/websocket/client.go 2017-02-16 17:27:40.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/websocket/client.go 2017-02-21 04:43:03.000000000 +0000 @@ -389,32 +389,3 @@ netConn = nil // to avoid close in defer. return conn, resp, nil } - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff -Nru lxd-2.9.1/dist/src/github.com/gorilla/websocket/.travis.yml lxd-2.9.2/dist/src/github.com/gorilla/websocket/.travis.yml --- lxd-2.9.1/dist/src/github.com/gorilla/websocket/.travis.yml 2017-02-16 17:27:40.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/gorilla/websocket/.travis.yml 2017-02-21 04:43:03.000000000 +0000 @@ -7,6 +7,7 @@ - go: 1.5 - go: 1.6 - go: 1.7 + - go: 1.8 - go: tip allow_failures: - go: tip diff -Nru lxd-2.9.1/dist/src/github.com/mattn/go-colorable/colorable_windows.go lxd-2.9.2/dist/src/github.com/mattn/go-colorable/colorable_windows.go --- lxd-2.9.1/dist/src/github.com/mattn/go-colorable/colorable_windows.go 2017-02-16 17:27:43.000000000 +0000 +++ lxd-2.9.2/dist/src/github.com/mattn/go-colorable/colorable_windows.go 2017-02-21 04:43:06.000000000 +0000 @@ -411,14 +411,12 @@ buf.Write([]byte(string(c))) } - var csbi consoleScreenBufferInfo switch m { case 'A': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': @@ -426,7 +424,6 @@ if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': @@ -434,7 +431,6 @@ if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x -= short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': @@ -444,7 +440,6 @@ } if n, err = strconv.Atoi(buf.String()); err == nil { var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x += short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) } @@ -453,7 +448,6 @@ if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) @@ -462,7 +456,6 @@ if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) @@ -471,50 +464,66 @@ if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'H': - token := strings.Split(buf.String(), ";") - if len(token) != 2 { - continue - } - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - n2, err := strconv.Atoi(token[1]) - if err != nil { - continue + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + panic(1) + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 } - csbi.cursorPosition.x = short(n2 - 1) - csbi.cursorPosition.y = short(n1 - 1) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'J': - n, err := strconv.Atoi(buf.String()) - if err != nil { - continue + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var count, written dword var cursor coord switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) case 1: cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x) case 2: cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) } - var count, written dword - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'K': - n, err := strconv.Atoi(buf.String()) - if err != nil { - continue + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord switch n { case 0: @@ -529,7 +538,6 @@ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes cs := buf.String() if cs == "" { @@ -650,13 +658,13 @@ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) } case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition case 'u': procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) } } - return len(data) - w.lastbuf.Len(), nil + + return len(data), nil } type consoleColor struct { diff -Nru lxd-2.9.1/dist/src/golang.org/x/net/lif/syscall.go lxd-2.9.2/dist/src/golang.org/x/net/lif/syscall.go --- lxd-2.9.1/dist/src/golang.org/x/net/lif/syscall.go 2017-02-16 17:27:53.000000000 +0000 +++ lxd-2.9.2/dist/src/golang.org/x/net/lif/syscall.go 2017-02-21 04:43:16.000000000 +0000 @@ -19,13 +19,8 @@ func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) -// TODO: replace with runtime.KeepAlive when available -//go:noescape -func keepAlive(p unsafe.Pointer) - func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) - keepAlive(arg) if errno != 0 { return error(errno) } diff -Nru lxd-2.9.1/dist/src/golang.org/x/net/lif/sys_solaris_amd64.s lxd-2.9.2/dist/src/golang.org/x/net/lif/sys_solaris_amd64.s --- lxd-2.9.1/dist/src/golang.org/x/net/lif/sys_solaris_amd64.s 2017-02-16 17:27:53.000000000 +0000 +++ lxd-2.9.2/dist/src/golang.org/x/net/lif/sys_solaris_amd64.s 2017-02-21 04:43:16.000000000 +0000 @@ -6,6 +6,3 @@ TEXT ·sysvicall6(SB),NOSPLIT,$0-88 JMP syscall·sysvicall6(SB) - -TEXT ·keepAlive(SB),NOSPLIT,$0 - RET diff -Nru lxd-2.9.1/dist/src/golang.org/x/net/route/binary.go lxd-2.9.2/dist/src/golang.org/x/net/route/binary.go --- lxd-2.9.1/dist/src/golang.org/x/net/route/binary.go 2017-02-16 17:27:53.000000000 +0000 +++ lxd-2.9.2/dist/src/golang.org/x/net/route/binary.go 2017-02-21 04:43:16.000000000 +0000 @@ -9,7 +9,7 @@ // This file contains duplicates of encoding/binary package. // // This package is supposed to be used by the net package of standard -// library. Therefore a package set used in the package must be the +// library. Therefore the package set used in the package must be the // same as net package. var ( diff -Nru lxd-2.9.1/dist/src/golang.org/x/net/route/syscall.go lxd-2.9.2/dist/src/golang.org/x/net/route/syscall.go --- lxd-2.9.1/dist/src/golang.org/x/net/route/syscall.go 2017-02-16 17:27:53.000000000 +0000 +++ lxd-2.9.2/dist/src/golang.org/x/net/route/syscall.go 2017-02-21 04:43:16.000000000 +0000 @@ -11,10 +11,6 @@ "unsafe" ) -// TODO: replace with runtime.KeepAlive when available -//go:noescape -func keepAlive(p unsafe.Pointer) - var zero uintptr func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { @@ -25,7 +21,6 @@ p = unsafe.Pointer(&zero) } _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - keepAlive(p) if errno != 0 { return error(errno) } diff -Nru lxd-2.9.1/dist/src/golang.org/x/net/route/syscall.s lxd-2.9.2/dist/src/golang.org/x/net/route/syscall.s --- lxd-2.9.1/dist/src/golang.org/x/net/route/syscall.s 2017-02-16 17:27:53.000000000 +0000 +++ lxd-2.9.2/dist/src/golang.org/x/net/route/syscall.s 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT ·keepAlive(SB),NOSPLIT,$0 - RET diff -Nru lxd-2.9.1/lxd/container.go lxd-2.9.2/lxd/container.go --- lxd-2.9.1/lxd/container.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/container.go 2017-02-21 04:42:34.000000000 +0000 @@ -221,12 +221,31 @@ return nil } +func isRootDiskDevice(device types.Device) bool { + if device["type"] == "disk" && device["path"] == "/" && device["source"] == "" { + return true + } + + return false +} + +func containerGetRootDiskDevice(devices types.Devices) (string, types.Device) { + for devName, dev := range devices { + if isRootDiskDevice(dev) { + return devName, dev + } + } + + return "", types.Device{} +} + func containerValidDevices(devices types.Devices, profile bool, expanded bool) error { // Empty device list if devices == nil { return nil } + var diskDevicePaths []string // Check each device individually for name, m := range devices { if m["type"] == "" { @@ -256,6 +275,12 @@ return fmt.Errorf("Missing parent for %s type nic.", m["nictype"]) } } else if m["type"] == "disk" { + if !expanded && !shared.StringInSlice(m["path"], diskDevicePaths) { + diskDevicePaths = append(diskDevicePaths, m["path"]) + } else if !expanded { + return fmt.Errorf("More than one disk device uses the same path: %s.", m["path"]) + } + if m["path"] == "" { return fmt.Errorf("Disk entry is missing the required \"path\" property.") } @@ -305,14 +330,8 @@ // Checks on the expanded config if expanded { - foundRootfs := false - for _, m := range devices { - if m["type"] == "disk" && m["path"] == "/" { - foundRootfs = true - } - } - - if !foundRootfs { + k, _ := containerGetRootDiskDevice(devices) + if k == "" { return fmt.Errorf("Container is lacking rootfs entry") } } @@ -427,6 +446,8 @@ StoragePool() string // FIXME: Those should be internal functions + // Needed for migration for now. + GetStoragePoolFromDevices() (string, error) StorageStart() error StorageStop() error Storage() storage @@ -660,6 +681,37 @@ } } + // Check that there are no contradicting root disk devices. + var profileRootDiskDevices []string + for _, pName := range args.Profiles { + _, p, err := dbProfileGet(d.db, pName) + if err != nil { + return nil, fmt.Errorf("Could not load profile '%s'.", pName) + } + + k, v := containerGetRootDiskDevice(p.Devices) + if k != "" && v["pool"] == "" { + return nil, fmt.Errorf("A root disk device must have the \"pool\" property set.") + } else if k != "" && !shared.StringInSlice(k, profileRootDiskDevices) { + profileRootDiskDevices = append(profileRootDiskDevices, k) + } + } + + k, newLocalRootDiskDevice := containerGetRootDiskDevice(args.Devices) + // Check whether container has a local root device with a "pool" + // property set. + if k != "" && newLocalRootDiskDevice["pool"] == "" { + return nil, fmt.Errorf("A root disk device must have the \"pool\" property set.") + } else if k == "" { + // Check whether the container's profiles provide a unique root + // device. + if len(profileRootDiskDevices) == 0 { + return nil, fmt.Errorf("Container relies on profile's root disk device but none was found") + } else if len(profileRootDiskDevices) > 1 { + return nil, fmt.Errorf("Container relies on profile's root disk device but conflicting devices were found") + } + } + // Create the container entry id, err := dbContainerCreate(d.db, args) if err != nil { diff -Nru lxd-2.9.1/lxd/container_lxc.go lxd-2.9.2/lxd/container_lxc.go --- lxd-2.9.1/lxd/container_lxc.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/container_lxc.go 2017-02-21 04:42:34.000000000 +0000 @@ -210,9 +210,16 @@ return nil, err } - storagePool := c.StoragePool() + // Retrieve the storage pool we've been given from the container's + // devices. + storagePool, err := c.GetStoragePoolFromDevices() + if err != nil { + return nil, err + } + c.storagePool = storagePool + // Get the storage pool ID for the container. - poolID, pool, err := dbStoragePoolGet(d.db, storagePool) + poolID, pool, err := dbStoragePoolGet(d.db, c.storagePool) if err != nil { c.Delete() return nil, err @@ -220,7 +227,7 @@ // Validate the requested storage volume configuration. volumeConfig := map[string]string{} - err = storageVolumeValidateConfig(storagePool, volumeConfig, pool) + err = storageVolumeValidateConfig(c.storagePool, volumeConfig, pool) if err != nil { c.Delete() return nil, err @@ -346,7 +353,7 @@ stateful: args.Stateful, } - // Load the config + // Load the config. err := c.init() if err != nil { return nil, err @@ -692,6 +699,11 @@ } } + // Try to retrieve the container's storage pool from the storage volumes + // database but do not fail so that users can recover from storage + // breakage. + c.initStoragePool() + return nil } @@ -1369,8 +1381,41 @@ return nil } +// Initialize the storage pool for this container: The devices of the container +// are inspected for a suitable storage pool. This function is called by +// containerLXCCreate() to detect a suitable storage pool. +func (c *containerLXC) GetStoragePoolFromDevices() (string, error) { + _, rootDiskDevice := containerGetRootDiskDevice(c.localDevices) + if rootDiskDevice["pool"] == "" { + _, rootDiskDevice = containerGetRootDiskDevice(c.expandedDevices) + } + if rootDiskDevice["pool"] == "" { + return "", fmt.Errorf("No storage pool found in the containers devices.") + } + + c.storagePool = rootDiskDevice["pool"] + return c.storagePool, nil +} + +// This function is called on all non-create operations where an entry for the +// container's storage volume will already exist in the database and so we can +// retrieve it. +func (c *containerLXC) initStoragePool() error { + if c.storagePool != "" { + return nil + } + + poolName, err := dbContainerPool(c.daemon.db, c.Name()) + if err != nil { + return err + } + c.storagePool = poolName + + return nil +} + // Initialize storage interface for this container. -func (c *containerLXC) initStorage() error { +func (c *containerLXC) initStorageInterface() error { if c.storage != nil { return nil } @@ -1426,9 +1471,6 @@ func (c *containerLXC) expandDevices() error { devices := types.Devices{} - rootDevices := 0 - profileStoragePool := "" - // Apply all the profiles for _, p := range c.profiles { profileDevices, err := dbDevices(c.daemon.db, p, true) @@ -1436,40 +1478,17 @@ return err } - // Check all pools specified in the container's profiles. for k, v := range profileDevices { devices[k] = v - - if v["type"] == "disk" && v["path"] == "/" { - rootDevices++ - profileStoragePool = v["pool"] - } - } } // Stick local devices on top for k, v := range c.localDevices { devices[k] = v - - if v["type"] == "disk" && v["path"] == "/" { - c.storagePool = v["pool"] - } - } - - if rootDevices > 1 { - return fmt.Errorf("Failed to detect unique root device: Multiple root devices detected.") - } - - if c.storagePool == "" { - if profileStoragePool == "" { - return fmt.Errorf("No storage pool specified.") - } - c.storagePool = profileStoragePool } c.expandedDevices = devices - return nil } @@ -2562,7 +2581,7 @@ var ctxMap log.Ctx // Initialize storage interface for the container. - err := c.initStorage() + err := c.initStorageInterface() if err != nil { return err } @@ -2690,7 +2709,7 @@ shared.LogInfo("Deleting container", ctxMap) // Initialize storage interface for the container. - err := c.initStorage() + err := c.initStorageInterface() if err != nil { return err } @@ -2765,7 +2784,7 @@ shared.LogInfo("Renaming container", ctxMap) // Initialize storage interface for the container. - err := c.initStorage() + err := c.initStorageInterface() if err != nil { return err } @@ -3180,7 +3199,7 @@ } // Initialize storage interface for the container. - err = c.initStorage() + err = c.initStorageInterface() if err != nil { return err } @@ -3224,57 +3243,83 @@ c.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base) } - // Apply disk quota changes - for _, m := range addDevices { - var oldRootfsSize string - for _, m := range oldExpandedDevices { - if m["type"] == "disk" && m["path"] == "/" { - oldRootfsSize = m["size"] - break - } + // Retrieve old root disk devices. + oldLocalRootDiskDeviceKey, oldLocalRootDiskDevice := containerGetRootDiskDevice(oldLocalDevices) + var oldProfileRootDiskDevices []string + for k, v := range oldExpandedDevices { + if isRootDiskDevice(v) && k != oldLocalRootDiskDeviceKey && !shared.StringInSlice(k, oldProfileRootDiskDevices) { + oldProfileRootDiskDevices = append(oldProfileRootDiskDevices, k) } + } - if m["size"] != oldRootfsSize { - size, err := shared.ParseByteSizeString(m["size"]) - if err != nil { - return err - } - - err = c.storage.ContainerSetQuota(c, size) - if err != nil { - return err - } + // Retrieve new root disk devices. + newLocalRootDiskDeviceKey, newLocalRootDiskDevice := containerGetRootDiskDevice(c.localDevices) + var newProfileRootDiskDevices []string + for k, v := range c.expandedDevices { + if isRootDiskDevice(v) && k != newLocalRootDiskDeviceKey && !shared.StringInSlice(k, newProfileRootDiskDevices) { + newProfileRootDiskDevices = append(newProfileRootDiskDevices, k) } } - // Confirm that the storage pool didn't change. - var oldRootfs types.Device - for _, m := range oldExpandedDevices { - if m["type"] == "disk" && m["path"] == "/" { - oldRootfs = m - break + // Verify root disk devices. (Be specific with error messages.) + var oldRootDiskDeviceKey string + var newRootDiskDeviceKey string + if oldLocalRootDiskDevice["pool"] != "" { + oldRootDiskDeviceKey = oldLocalRootDiskDeviceKey + newRootDiskDeviceKey = newLocalRootDiskDeviceKey + + if newLocalRootDiskDevice["pool"] == "" { + if len(newProfileRootDiskDevices) == 0 { + return fmt.Errorf("Update will cause the container to rely on a profile's root disk device but none was found.") + } else if len(newProfileRootDiskDevices) > 1 { + return fmt.Errorf("Update will cause the container to rely on a profile's root disk device but conflicting devices were found.") + } else if c.expandedDevices[newProfileRootDiskDevices[0]]["pool"] != oldLocalRootDiskDevice["pool"] { + newRootDiskDeviceKey = newProfileRootDiskDevices[0] + return fmt.Errorf("Using the profile's root disk device would change the storage pool of the container.") + } + } + } else { + // This branch should allow us to cover cases where a container + // didn't have root disk device before for whatever reason. As + // long as there is a root disk device in one of the local or + // profile devices we're good. + if newLocalRootDiskDevice["pool"] != "" { + newRootDiskDeviceKey = newLocalRootDiskDeviceKey + + if len(oldProfileRootDiskDevices) > 0 { + oldRootDiskDeviceKey = oldProfileRootDiskDevices[0] + if oldExpandedDevices[oldRootDiskDeviceKey]["pool"] != newLocalRootDiskDevice["pool"] { + return fmt.Errorf("The new local root disk device would change the storage pool of the container.") + } + } + } else { + if len(newProfileRootDiskDevices) == 0 { + return fmt.Errorf("Update will cause the container to rely on a profile's root disk device but none was found.") + } else if len(newProfileRootDiskDevices) > 1 { + return fmt.Errorf("Using the profile's root disk device would change the storage pool of the container.") + } + newRootDiskDeviceKey = newProfileRootDiskDevices[0] } } - var newRootfs types.Device - for _, name := range c.expandedDevices.DeviceNames() { - m := c.expandedDevices[name] - if m["type"] == "disk" && m["path"] == "/" { - newRootfs = m - break + oldRootDiskDeviceSize := oldExpandedDevices[oldRootDiskDeviceKey]["size"] + newRootDiskDeviceSize := c.expandedDevices[newRootDiskDeviceKey]["size"] + + // Apply disk quota changes + if newRootDiskDeviceSize != oldRootDiskDeviceSize { + size, err := shared.ParseByteSizeString(newRootDiskDeviceSize) + if err != nil { + return err } - } - if oldRootfs["pool"] != "" && (oldRootfs["pool"] != newRootfs["pool"]) { - return fmt.Errorf("Changing the storage pool of a container is not yet implemented.") + err = c.storage.ContainerSetQuota(c, size) + if err != nil { + return err + } } // Apply the live changes if c.IsRunning() { - if oldRootfs["source"] != newRootfs["source"] { - return fmt.Errorf("Cannot change the rootfs path of a running container") - } - // Live update the container config for _, key := range changedConfig { value := c.expandedConfig[key] @@ -4039,7 +4084,7 @@ shared.LogInfo("Migrating container", ctxMap) // Initialize storage interface for the container. - err = c.initStorage() + err = c.initStorageInterface() if err != nil { return err } @@ -4752,7 +4797,7 @@ disk := map[string]api.ContainerStateDisk{} // Initialize storage interface for the container. - err := c.initStorage() + err := c.initStorageInterface() if err != nil { return disk } @@ -4990,7 +5035,7 @@ func (c *containerLXC) StorageStart() error { // Initialize storage interface for the container. - err := c.initStorage() + err := c.initStorageInterface() if err != nil { return err } @@ -5008,7 +5053,7 @@ func (c *containerLXC) StorageStop() error { // Initialize storage interface for the container. - err := c.initStorage() + err := c.initStorageInterface() if err != nil { return err } diff -Nru lxd-2.9.1/lxd/containers_post.go lxd-2.9.2/lxd/containers_post.go --- lxd-2.9.1/lxd/containers_post.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/containers_post.go 2017-02-21 04:42:34.000000000 +0000 @@ -203,6 +203,83 @@ * point and just negotiate it over the migration control * socket. Anyway, it'll happen later :) */ + + localRootDiskDeviceKey, v := containerGetRootDiskDevice(req.Devices) + poolForCopyOrMove := "" + // Handle copying/moving between two storage-api LXD instances. + // FIXME(brauner): Atm, we do not let users target a specific pool to + // move to. So when we receive an invalid/non-existing storage pool, we + // simply set it to "" and perform the same pool-searching algorithm as + // in the non-storage-api to storage-api LXD instance case seen below. + // the container will receive on the remote. + if localRootDiskDeviceKey != "" && v["pool"] != "" { + _, err := dbStoragePoolGetID(d.db, v["pool"]) + if err == NoSuchObjectError { + v["pool"] = "" + } + } + + // Handle copying or moving containers between a non-storage-api and a + // storage-api LXD instance. + if localRootDiskDeviceKey == "" || localRootDiskDeviceKey != "" && v["pool"] == "" { + // Request came without a root disk device or without the pool + // property set. Try to retrieve this information from a + // profile. (No need to check for conflicting storage pool + // properties in the profiles. This will be handled by + // containerCreateInternal().) + for _, pName := range req.Profiles { + _, p, err := dbProfileGet(d.db, pName) + if err != nil { + return InternalError(err) + } + + k, v := containerGetRootDiskDevice(p.Devices) + if k != "" && v["pool"] != "" { + poolForCopyOrMove = v["pool"] + break + } + } + if poolForCopyOrMove == "" { + pools, err := dbStoragePools(d.db) + if err != nil { + return InternalError(err) + } + + if len(pools) != 1 { + return InternalError(fmt.Errorf("No unique storage pool found.")) + } + + poolForCopyOrMove = pools[0] + } + } + + if localRootDiskDeviceKey == "" { + // Give the container it's own local root disk device with a + // pool property. + rootDev := map[string]string{} + rootDev["type"] = "disk" + rootDev["path"] = "/" + rootDev["pool"] = poolForCopyOrMove + if args.Devices == nil { + args.Devices = map[string]map[string]string{} + } + + // Make sure that we do not overwrite a device the user + // is currently using under the name "root". + rootDevName := "root" + for i := 0; i < 100; i++ { + if args.Devices[rootDevName] == nil { + break + } + rootDevName = fmt.Sprintf("root%d", i) + continue + } + args.Devices["root"] = rootDev + } else if args.Devices[localRootDiskDeviceKey]["pool"] == "" { + // Give the container's root disk device a pool property. + args.Devices[localRootDiskDeviceKey]["pool"] = poolForCopyOrMove + } + if err != nil { c, err = containerCreateAsEmpty(d, args) if err != nil { @@ -213,6 +290,11 @@ if err != nil { return InternalError(err) } + + _, err = cM.GetStoragePoolFromDevices() + if err != nil { + return InternalError(err) + } ps, err := storagePoolInit(d, cM.StoragePool()) if err != nil { diff -Nru lxd-2.9.1/lxd/main.go lxd-2.9.2/lxd/main.go --- lxd-2.9.1/lxd/main.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/main.go 2017-02-21 04:42:34.000000000 +0000 @@ -26,7 +26,7 @@ var argStorageBackend = gnuflag.String("storage-backend", "", "") var argStorageCreateDevice = gnuflag.String("storage-create-device", "", "") var argStorageCreateLoop = gnuflag.Int64("storage-create-loop", -1, "") -var argStoragePool = gnuflag.String("storage-pool", "", "") +var argStorageDataset = gnuflag.String("storage-pool", "", "") var argSyslog = gnuflag.Bool("syslog", false, "") var argTimeout = gnuflag.Int("timeout", -1, "") var argTrustPassword = gnuflag.String("trust-password", "", "") diff -Nru lxd-2.9.1/lxd/main_init.go lxd-2.9.2/lxd/main_init.go --- lxd-2.9.1/lxd/main_init.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/main_init.go 2017-02-21 04:42:34.000000000 +0000 @@ -23,6 +23,7 @@ var storageLoopSize int64 // Size in GB var storageDevice string // Path var storagePool string // pool name + var storageDataset string // existing ZFS pool name var networkAddress string // Address var networkPort int64 // Port var trustPassword string // Trust password @@ -192,7 +193,7 @@ } if *argStorageBackend == "dir" { - if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStoragePool != "" { + if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStorageDataset != "" { return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend.") } } @@ -201,10 +202,6 @@ if *argStorageCreateLoop != -1 && *argStorageCreateDevice != "" { return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified with the 'zfs' backend.") } - - if *argStoragePool == "" { - return fmt.Errorf("--storage-pool must be specified with the 'zfs' backend.") - } } if *argNetworkAddress == "" { @@ -219,13 +216,14 @@ storageBackend = *argStorageBackend storageLoopSize = *argStorageCreateLoop storageDevice = *argStorageCreateDevice - storagePool = *argStoragePool + storageDataset = *argStorageDataset networkAddress = *argNetworkAddress networkPort = *argNetworkPort trustPassword = *argTrustPassword + storagePool = "default" storageSetup = true } else { - if *argStorageBackend != "" || *argStorageCreateDevice != "" || *argStorageCreateLoop != -1 || *argStoragePool != "" || *argNetworkAddress != "" || *argNetworkPort != -1 || *argTrustPassword != "" { + if *argStorageBackend != "" || *argStorageCreateDevice != "" || *argStorageCreateLoop != -1 || *argStorageDataset != "" || *argNetworkAddress != "" || *argNetworkPort != -1 || *argTrustPassword != "" { return fmt.Errorf("Init configuration is only valid with --auto") } @@ -249,6 +247,7 @@ } if storageSetup && storageBackend == "zfs" { + storageLoopSize = -1 if askBool("Create a new ZFS pool (yes/no) [default=yes]? ", "yes") { if askBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") { deviceExists := func(path string) error { @@ -278,7 +277,7 @@ storageLoopSize = askInt(q, 1, -1, fmt.Sprintf("%d", def)) } } else { - storagePool = askString("Name of the existing ZFS pool or dataset: ", "", nil) + storageDataset = askString("Name of the existing ZFS pool or dataset: ", "", nil) } } @@ -360,14 +359,25 @@ } } - // Destroy any existing loop device - for _, file := range []string{"zfs.img"} { - os.Remove(shared.VarPath(file)) - } - + // Pool configuration storageConfig := map[string]string{} - storageConfig["source"] = storageDevice - if storageBackend != "dir" { + if storageDevice != "" { + storageConfig["source"] = storageDevice + // The user probably wants to give the storage pool a + // custom name. + if storageDataset != "" { + storagePool = storageDataset + } + } else if storageDataset != "" && storageBackend == "zfs" && storageLoopSize < 0 { + storageConfig["source"] = storageDataset + } + + if storageBackend != "dir" && storageLoopSize > 0 { + // The user probably wants to give the storage pool a + // custom name. + if storageDataset != "" { + storagePool = storageDataset + } storageConfig["size"] = strconv.FormatInt(storageLoopSize, 10) + "GB" } diff -Nru lxd-2.9.1/lxd/patches.go lxd-2.9.2/lxd/patches.go --- lxd-2.9.1/lxd/patches.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/patches.go 2017-02-21 04:42:34.000000000 +0000 @@ -6,6 +6,7 @@ "os/exec" "strconv" "strings" + "syscall" "github.com/lxc/lxd/shared" @@ -34,6 +35,8 @@ {name: "leftover_profile_config", run: patchLeftoverProfileConfig}, {name: "network_permissions", run: patchNetworkPermissions}, {name: "storage_api", run: patchStorageApi}, + {name: "storage_api_v1", run: patchStorageApiV1}, + {name: "storage_api_dir_cleanup", run: patchStorageApiDirCleanup}, } type patch struct { @@ -245,59 +248,10 @@ // appropriate device including a pool is added to the default profile // or the user explicitly passes the pool the container's storage volume // is supposed to be created on. - defaultID, defaultProfile, err := dbProfileGet(d.db, "default") - if err == nil { - foundRoot := false - for k, v := range defaultProfile.Devices { - if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" { - // Add the default storage pool. - defaultProfile.Devices[k]["pool"] = poolName - foundRoot = true - } - } - - if !foundRoot { - rootDev := map[string]string{} - rootDev["type"] = "disk" - rootDev["path"] = "/" - rootDev["pool"] = poolName - if defaultProfile.Devices == nil { - defaultProfile.Devices = map[string]map[string]string{} - } - defaultProfile.Devices["root"] = rootDev - } - - // This is nasty, but we need to clear the profiles config and - // devices in order to add the new root device including the - // newly added storage pool. - tx, err := dbBegin(d.db) - if err != nil { - return err - } - - err = dbProfileConfigClear(tx, defaultID) - if err != nil { - tx.Rollback() - return err - } - - err = dbProfileConfigAdd(tx, defaultID, defaultProfile.Config) - if err != nil { - tx.Rollback() - return err - } - - err = dbDevicesAdd(tx, "profile", defaultID, defaultProfile.Devices) - if err != nil { - tx.Rollback() - return err - } - - err = tx.Commit() - if err != nil { - tx.Rollback() - return err - } + allcontainers := append(cRegular, cSnapshots...) + err = updatePoolPropertyForAllObjects(d, poolName, allcontainers) + if err != nil { + return err } // Unset deprecated storage keys. @@ -318,18 +272,35 @@ poolSubvolumePath := getStoragePoolMountPoint(defaultPoolName) poolConfig["source"] = poolSubvolumePath - poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig) - if err != nil { - return err - } + poolID := int64(-1) + _, err := dbStoragePools(d.db) + if err == nil { // Already exist valid storage pools. + // Get the pool ID as we need it for storage volume creation. + // (Use a tmp variable as Go's scoping is freaking me out.) + tmp, err := dbStoragePoolGetID(d.db, defaultPoolName) + if err != nil { + shared.LogErrorf("Failed to query database: %s.", err) + return err + } + poolID = tmp + } else if err == NoSuchObjectError { // Likely a pristine upgrade. + tmp, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig) + if err != nil { + return err + } + poolID = tmp - s, err := storagePoolInit(d, defaultPoolName) - if err != nil { - return err - } + s, err := storagePoolInit(d, defaultPoolName) + if err != nil { + return err + } - err = s.StoragePoolCreate() - if err != nil { + err = s.StoragePoolCreate() + if err != nil { + return err + } + } else { // Shouldn't happen. + shared.LogErrorf("Failed to query database: %s.", err) return err } @@ -339,19 +310,30 @@ if len(cRegular) > 0 { // ${LXD_DIR}/storage-pools/ containersSubvolumePath := getContainerMountPoint(defaultPoolName, "") - err := os.MkdirAll(containersSubvolumePath, 0711) - if err != nil { - return err + if !shared.PathExists(containersSubvolumePath) { + err := os.MkdirAll(containersSubvolumePath, 0711) + if err != nil { + return err + } } } for _, ct := range cRegular { // Create new db entry in the storage volumes table for the // container. - _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, ct, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for container \"%s\".", ct) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Rename the btrfs subvolume and making it a @@ -359,9 +341,11 @@ // mv ${LXD_DIR}/containers/ ${LXD_DIR}/storage-pools// oldContainerMntPoint := shared.VarPath("containers", ct) newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct) - err = os.Rename(oldContainerMntPoint, newContainerMntPoint) - if err != nil { - return err + if shared.PathExists(oldContainerMntPoint) { + err = os.Rename(oldContainerMntPoint, newContainerMntPoint) + if err != nil { + return err + } } // Create a symlink to the mountpoint of the container: @@ -384,9 +368,11 @@ // the new storage pool: // ${LXD_DIR}/storage-pools//snapshots newSnapshotsMntPoint := getSnapshotMountPoint(defaultPoolName, ct) - err = os.MkdirAll(newSnapshotsMntPoint, 0700) - if err != nil { - return err + if !shared.PathExists(newSnapshotsMntPoint) { + err := os.MkdirAll(newSnapshotsMntPoint, 0700) + if err != nil { + return err + } } } @@ -394,10 +380,22 @@ // Insert storage volumes for snapshots into the // database. Note that snapshots have already been moved // and symlinked above. So no need to do any work here. - _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs) + _, err := dbStoragePoolVolumeGetTypeID(d.db, cs, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + // For btrfs we need to assume that the btrfs + // execs below succeeded. continue + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for snapshot \"%s\".", cs) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // We need to create a new snapshot since we can't move @@ -434,16 +432,13 @@ // ${LXD_DIR}/snapshots/ -> ${LXD_DIR}/storage-pools//snapshots/ snapshotsPath := shared.VarPath("snapshots", ct) newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, ct) - if shared.PathExists(snapshotsPath) { - err := os.Remove(snapshotsPath) + os.Remove(snapshotsPath) + if !shared.PathExists(snapshotsPath) { + err := os.Symlink(newSnapshotMntPoint, snapshotsPath) if err != nil { return err } } - err = os.Symlink(newSnapshotMntPoint, snapshotsPath) - if err != nil { - return err - } } } @@ -452,23 +447,36 @@ // move. The tarballs remain in their original location. images := append(imgPublic, imgPrivate...) for _, img := range images { - _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, img, storagePoolVolumeTypeImage, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) + if err != nil { + shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } imagesMntPoint := getImageMountPoint(defaultPoolName, "") - err = os.MkdirAll(imagesMntPoint, 0700) - if err != nil { - return err + if !shared.PathExists(imagesMntPoint) { + err := os.MkdirAll(imagesMntPoint, 0700) + if err != nil { + return err + } } oldImageMntPoint := shared.VarPath("images", img+".btrfs") newImageMntPoint := getImageMountPoint(defaultPoolName, img) - err = os.Rename(oldImageMntPoint, newImageMntPoint) - if err != nil { - return err + if shared.PathExists(oldImageMntPoint) { + err := os.Rename(oldImageMntPoint, newImageMntPoint) + if err != nil { + return err + } } } @@ -479,18 +487,35 @@ poolConfig := map[string]string{} poolConfig["source"] = shared.VarPath("storage-pools", defaultPoolName) - poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig) - if err != nil { - return err - } + poolID := int64(-1) + _, err := dbStoragePools(d.db) + if err == nil { // Already exist valid storage pools. + // Get the pool ID as we need it for storage volume creation. + // (Use a tmp variable as Go's scoping is freaking me out.) + tmp, err := dbStoragePoolGetID(d.db, defaultPoolName) + if err != nil { + shared.LogErrorf("Failed to query database: %s.", err) + return err + } + poolID = tmp + } else if err == NoSuchObjectError { // Likely a pristine upgrade. + tmp, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig) + if err != nil { + return err + } + poolID = tmp - s, err := storagePoolInit(d, defaultPoolName) - if err != nil { - return err - } + s, err := storagePoolInit(d, defaultPoolName) + if err != nil { + return err + } - err = s.StoragePoolCreate() - if err != nil { + err = s.StoragePoolCreate() + if err != nil { + return err + } + } else { // Shouldn't happen. + shared.LogErrorf("Failed to query database: %s.", err) return err } @@ -498,26 +523,39 @@ volumeConfig := map[string]string{} // Insert storage volumes for containers into the database. for _, ct := range cRegular { - _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, ct, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for container \"%s\".", ct) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Create the new path where containers will be located on the // new storage api. containersMntPoint := getContainerMountPoint(defaultPoolName, "") - err = os.MkdirAll(containersMntPoint, 0711) - if err != nil { - return err + if !shared.PathExists(containersMntPoint) { + err := os.MkdirAll(containersMntPoint, 0711) + if err != nil { + return err + } } // Simply rename the container when they are directories. oldContainerMntPoint := shared.VarPath("containers", ct) newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct) - err = os.Rename(oldContainerMntPoint, newContainerMntPoint) - if err != nil { - return err + if shared.PathExists(oldContainerMntPoint) { + err := os.Rename(oldContainerMntPoint, newContainerMntPoint) + if err != nil { + return err + } } doesntMatter := false @@ -543,16 +581,20 @@ // Create the new path where snapshots will be located on the // new storage api. snapshotsMntPoint := shared.VarPath("storage-pools", defaultPoolName, "snapshots") - err = os.MkdirAll(snapshotsMntPoint, 0711) - if err != nil { - return err + if !shared.PathExists(snapshotsMntPoint) { + err := os.MkdirAll(snapshotsMntPoint, 0711) + if err != nil { + return err + } } // Now simply rename the snapshots directory as well. newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, ct) - err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint) - if err != nil { - return err + if shared.PathExists(oldSnapshotMntPoint) { + err := os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint) + if err != nil { + return err + } } // Create a symlink for this container. snapshots. @@ -566,10 +608,22 @@ // snapshots have already been moved and symlinked above. So no need to // do any work here. for _, cs := range cSnapshots { - _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs) - continue + // Insert storage volumes for snapshots into the + // database. Note that snapshots have already been moved + // and symlinked above. So no need to do any work here. + _, err := dbStoragePoolVolumeGetTypeID(d.db, cs, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for snapshot \"%s\".", cs) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } } @@ -577,10 +631,19 @@ // move. The tarballs remain in their original location. images := append(imgPublic, imgPrivate...) for _, img := range images { - _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, img, storagePoolVolumeTypeImage, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) + if err != nil { + shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } } @@ -613,15 +676,42 @@ return err } - poolID, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig) - if err != nil { + // Peek into the storage pool database to see whether any storage pools + // are already configured. If so, we can assume that a partial upgrade + // has been performed and can skip the next steps. + poolID := int64(-1) + pools, err := dbStoragePools(d.db) + if err == nil { // Already exist valid storage pools. + // Check if the storage pool already has a db entry. + if shared.StringInSlice(defaultPoolName, pools) { + shared.LogWarnf("Database already contains a valid entry for the storage pool: %s.", defaultPoolName) + } + + // Get the pool ID as we need it for storage volume creation. + // (Use a tmp variable as Go's scoping is freaking me out.) + tmp, err := dbStoragePoolGetID(d.db, defaultPoolName) + if err != nil { + shared.LogErrorf("Failed to query database: %s.", err) + return err + } + poolID = tmp + } else if err == NoSuchObjectError { // Likely a pristine upgrade. + tmp, err := dbStoragePoolCreate(d.db, defaultPoolName, defaultStorageTypeName, poolConfig) + if err != nil { + return err + } + poolID = tmp + } else { // Shouldn't happen. + shared.LogErrorf("Failed to query database: %s.", err) return err } poolMntPoint := getStoragePoolMountPoint(defaultPoolName) - err = os.MkdirAll(poolMntPoint, 0711) - if err != nil { - return err + if !shared.PathExists(poolMntPoint) { + err = os.MkdirAll(poolMntPoint, 0711) + if err != nil { + shared.LogWarnf("Failed to create pool mountpoint: %s", poolMntPoint) + } } // Create storage volumes in the database. @@ -629,18 +719,29 @@ if len(cRegular) > 0 { newContainersMntPoint := getContainerMountPoint(defaultPoolName, "") - err = os.MkdirAll(newContainersMntPoint, 0711) - if err != nil { - return err + if !shared.PathExists(newContainersMntPoint) { + err = os.MkdirAll(newContainersMntPoint, 0711) + if err != nil { + shared.LogWarnf("Failed to create containers mountpoint: %s", newContainersMntPoint) + } } } // Insert storage volumes for containers into the database. for _, ct := range cRegular { - _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, ct, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for container \"%s\".", ct) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Unmount the logical volume. @@ -656,9 +757,11 @@ // new storage api. We do os.Rename() here to preserve // permissions and ownership. newContainerMntPoint := getContainerMountPoint(defaultPoolName, ct) - err = os.Rename(oldContainerMntPoint, newContainerMntPoint) - if err != nil { - return err + if !shared.PathExists(newContainerMntPoint) { + err = os.Rename(oldContainerMntPoint, newContainerMntPoint) + if err != nil { + return err + } } if shared.PathExists(oldContainerMntPoint + ".lv") { @@ -698,20 +801,33 @@ } for _, cs := range ctSnapshots { - // Insert storage volumes for snapshots. - _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs) - continue + // Insert storage volumes for snapshots into the + // database. Note that snapshots have already been moved + // and symlinked above. So no need to do any work here. + _, err := dbStoragePoolVolumeGetTypeID(d.db, cs, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for snapshot \"%s\".", cs) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Create the snapshots directory in the new storage // pool: // ${LXD_DIR}/storage-pools//snapshots newSnapshotMntPoint := getSnapshotMountPoint(defaultPoolName, cs) - err = os.MkdirAll(newSnapshotMntPoint, 0700) - if err != nil { - return err + if !shared.PathExists(newSnapshotMntPoint) { + err := os.MkdirAll(newSnapshotMntPoint, 0700) + if err != nil { + return err + } } // Unmount the logical volume. @@ -725,15 +841,14 @@ // Rename the snapshot mountpoint to preserve acl's and // so on. - err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint) - if err != nil { - return err + if shared.PathExists(oldSnapshotMntPoint) { + err := os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint) + if err != nil { + return err + } } - err = os.Remove(oldSnapshotMntPoint + ".lv") - if err != nil { - return err - } + os.Remove(oldSnapshotMntPoint + ".lv") // Make sure we use a valid lv name. csLvName := containerNameToLVName(cs) @@ -758,9 +873,11 @@ return err } } - err = os.Symlink(newSnapshotsPath, snapshotsPath) - if err != nil { - return err + if !shared.PathExists(snapshotsPath) { + err = os.Symlink(newSnapshotsPath, snapshotsPath) + if err != nil { + return err + } } } @@ -776,10 +893,19 @@ } for _, img := range images { - _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, img, storagePoolVolumeTypeImage, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) + if err != nil { + shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Unmount the logical volume. @@ -799,9 +925,11 @@ } newImageMntPoint := getImageMountPoint(defaultPoolName, img) - err = os.MkdirAll(newImageMntPoint, 0700) - if err != nil { - return err + if !shared.PathExists(newImageMntPoint) { + err := os.MkdirAll(newImageMntPoint, 0700) + if err != nil { + return err + } } // Rename the logical volume device. @@ -819,33 +947,66 @@ poolConfig := map[string]string{} oldLoopFilePath := shared.VarPath("zfs.img") poolName := defaultPoolName - if shared.PathExists(oldLoopFilePath) { - // This is a loop file pool. - poolConfig["source"] = shared.VarPath("disks", defaultPoolName+".img") - err := os.Rename(oldLoopFilePath, poolConfig["source"]) - if err != nil { - return err - } - } else { + + // Peek into the storage pool database to see whether any storage pools + // are already configured. If so, we can assume that a partial upgrade + // has been performed and can skip the next steps. Otherwise we might + // run into problems. For example, the "zfs.img" file might have already + // been moved into ${LXD_DIR}/disks and we might therefore falsely + // conclude that we're using an existing storage pool. + poolID := int64(-1) + pools, err := dbStoragePools(d.db) + if err == nil { // Already exist valid storage pools. if strings.Contains(defaultPoolName, "/") { poolName = "default" } - // This is a block device pool. - poolConfig["source"] = defaultPoolName - } - if poolName == defaultPoolName { - output, err := exec.Command("zpool", "get", "size", "-p", "-H", defaultPoolName).CombinedOutput() + // Check if the storage pool already has a db entry. + if shared.StringInSlice(poolName, pools) { + shared.LogWarnf("Database already contains a valid entry for the storage pool: %s.", poolName) + } + + // Get the pool ID as we need it for storage volume creation. + // (Use a tmp variable as Go's scoping is freaking me out.) + tmp, err := dbStoragePoolGetID(d.db, poolName) if err != nil { - return fmt.Errorf("Failed to set ZFS config: %s", output) + shared.LogErrorf("Failed to query database: %s.", err) + return err + } + poolID = tmp + } else if err == NoSuchObjectError { // Likely a pristine upgrade. + if shared.PathExists(oldLoopFilePath) { + // This is a loop file pool. + poolConfig["source"] = shared.VarPath("disks", defaultPoolName+".img") + err := os.Rename(oldLoopFilePath, poolConfig["source"]) + if err != nil { + return err + } + } else { + if strings.Contains(defaultPoolName, "/") { + poolName = "default" + } + // This is a block device pool. + poolConfig["source"] = defaultPoolName } - lidx := strings.LastIndex(string(output), "\t") - fidx := strings.LastIndex(string(output)[:lidx-1], "\t") - poolConfig["size"] = string(output)[fidx+1 : lidx] - } - poolID, err := dbStoragePoolCreate(d.db, poolName, defaultStorageTypeName, poolConfig) - if err != nil { + if poolName == defaultPoolName { + output, err := exec.Command("zpool", "get", "size", "-p", "-H", defaultPoolName).CombinedOutput() + if err == nil { + lidx := strings.LastIndex(string(output), "\t") + fidx := strings.LastIndex(string(output)[:lidx-1], "\t") + poolConfig["size"] = string(output)[fidx+1 : lidx] + } + } + + // (Use a tmp variable as Go's scoping is freaking me out.) + tmp, err := dbStoragePoolCreate(d.db, poolName, defaultStorageTypeName, poolConfig) + if err != nil { + shared.LogWarnf("Storage pool already exists in the database. Proceeding...") + } + poolID = tmp + } else { // Shouldn't happen. + shared.LogErrorf("Failed to query database: %s.", err) return err } @@ -854,19 +1015,29 @@ if len(cRegular) > 0 { containersSubvolumePath := getContainerMountPoint(poolName, "") - err := os.MkdirAll(containersSubvolumePath, 0711) - if err != nil { - return err + if !shared.PathExists(containersSubvolumePath) { + err := os.MkdirAll(containersSubvolumePath, 0711) + if err != nil { + shared.LogWarnf("Failed to create path: %s.", containersSubvolumePath) + } } } + failedUpgradeEntities := []string{} for _, ct := range cRegular { - - // Insert storage volumes for containers into the database. - _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for container \"%s\".", ct) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, ct, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, ct, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for container \"%s\".", ct) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Unmount the container zfs doesn't really seem to care if we @@ -874,21 +1045,20 @@ ctDataset := fmt.Sprintf("%s/containers/%s", defaultPoolName, ct) oldContainerMntPoint := shared.VarPath("containers", ct) if shared.IsMountPoint(oldContainerMntPoint) { - output, err := tryExec("zfs", "unmount", "-f", ctDataset) + _, err := tryExec("zfs", "unmount", "-f", ctDataset) if err != nil { - return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output) + shared.LogWarnf("Failed to unmount ZFS filesystem via zfs unmount. Trying lazy umount (MNT_DETACH)...") + err := tryUnmount(oldContainerMntPoint, syscall.MNT_DETACH) + if err != nil { + failedUpgradeEntities = append(failedUpgradeEntities, fmt.Sprintf("containers/%s: Failed to umount zfs filesystem.", ct)) + continue + } } } - err = os.Remove(oldContainerMntPoint) - if err != nil { - return err - } + os.Remove(oldContainerMntPoint) - err = os.Remove(oldContainerMntPoint + ".zfs") - if err != nil { - return err - } + os.Remove(oldContainerMntPoint + ".zfs") // Changing the mountpoint property should have actually created // the path but in case it somehow didn't let's do it ourselves. @@ -896,7 +1066,9 @@ newContainerMntPoint := getContainerMountPoint(poolName, ct) err = createContainerMountpoint(newContainerMntPoint, oldContainerMntPoint, doesntMatter) if err != nil { - return err + shared.LogWarnf("Failed to create mountpoint for the container: %s.", newContainerMntPoint) + failedUpgradeEntities = append(failedUpgradeEntities, fmt.Sprintf("containers/%s: Failed to create container mountpoint: %s", ct, err)) + continue } // Set new mountpoint for the container's dataset it will be @@ -907,12 +1079,15 @@ fmt.Sprintf("mountpoint=%s", newContainerMntPoint), ctDataset).CombinedOutput() if err != nil { - return fmt.Errorf("Failed to set new ZFS mountpoint: %s.", output) + shared.LogWarnf("Failed to set new ZFS mountpoint: %s.", output) + failedUpgradeEntities = append(failedUpgradeEntities, fmt.Sprintf("containers/%s: Failed to set new zfs mountpoint: %s", ct, err)) + continue } // Check if we need to account for snapshots for this container. ctSnapshots, err := dbContainerGetSnapshots(d.db, ct) if err != nil { + shared.LogErrorf("Failed to query database") return err } @@ -921,18 +1096,31 @@ // Insert storage volumes for snapshots into the // database. Note that snapshots have already been moved // and symlinked above. So no need to do any work here. - _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for snapshot \"%s\".", cs) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, cs, storagePoolVolumeTypeContainer, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, cs, storagePoolVolumeTypeContainer, poolID, volumeConfig) + if err != nil { + shared.LogErrorf("Could not insert a storage volume for snapshot \"%s\".", cs) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } // Create the new mountpoint for snapshots in the new // storage api. newSnapshotMntPoint := getSnapshotMountPoint(poolName, cs) - err = os.MkdirAll(newSnapshotMntPoint, 0711) - if err != nil { - return err + if !shared.PathExists(newSnapshotMntPoint) { + err = os.MkdirAll(newSnapshotMntPoint, 0711) + if err != nil { + shared.LogWarnf("Failed to create mountpoint for snapshot: %s.", newSnapshotMntPoint) + failedUpgradeEntities = append(failedUpgradeEntities, fmt.Sprintf("snapshots/%s: Failed to create mountpoint for snapshot.", cs)) + continue + } } } @@ -941,9 +1129,11 @@ // Create a symlink for this container's snapshots. if len(ctSnapshots) != 0 { newSnapshotsMntPoint := getSnapshotMountPoint(poolName, ct) - err := os.Symlink(newSnapshotsMntPoint, snapshotsPath) - if err != nil { - return err + if !shared.PathExists(newSnapshotsMntPoint) { + err := os.Symlink(newSnapshotsMntPoint, snapshotsPath) + if err != nil { + shared.LogWarnf("Failed to create symlink for snapshots: %s -> %s.", snapshotsPath, newSnapshotsMntPoint) + } } } } @@ -952,41 +1142,262 @@ // move. The tarballs remain in their original location. images := append(imgPublic, imgPrivate...) for _, img := range images { - _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) - if err != nil { - shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) - continue + _, err := dbStoragePoolVolumeGetTypeID(d.db, img, storagePoolVolumeTypeImage, poolID) + if err == nil { + shared.LogWarnf("Storage volumes database already contains an entry for the container.") + } else if err == NoSuchObjectError { + // Insert storage volumes for containers into the database. + _, err := dbStoragePoolVolumeCreate(d.db, img, storagePoolVolumeTypeImage, poolID, volumeConfig) + if err != nil { + shared.LogWarnf("Could not insert a storage volume for image \"%s\".", img) + return err + } + } else { + shared.LogErrorf("Failed to query database: %s", err) + return err } imageMntPoint := getImageMountPoint(poolName, img) - err = os.MkdirAll(imageMntPoint, 0700) - if err != nil { - return err + if !shared.PathExists(imageMntPoint) { + err := os.MkdirAll(imageMntPoint, 0700) + if err != nil { + shared.LogWarnf("Failed to create image mountpoint. Proceeding...") + } } oldImageMntPoint := shared.VarPath("images", img+".zfs") imageDataset := fmt.Sprintf("%s/images/%s", defaultPoolName, img) - if shared.PathExists(oldImageMntPoint) { - if shared.IsMountPoint(oldImageMntPoint) { - output, err := tryExec("zfs", "unmount", "-f", imageDataset) + if shared.PathExists(oldImageMntPoint) && shared.IsMountPoint(oldImageMntPoint) { + _, err := tryExec("zfs", "unmount", "-f", imageDataset) + if err != nil { + shared.LogWarnf("Failed to unmount ZFS filesystem via zfs unmount. Trying lazy umount (MNT_DETACH)...") + err := tryUnmount(oldImageMntPoint, syscall.MNT_DETACH) if err != nil { - return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output) + shared.LogWarnf("Failed to unmount ZFS filesystem: %s", err) } } - err = os.Remove(oldImageMntPoint) - if err != nil { - return err - } + os.Remove(oldImageMntPoint) } // Set new mountpoint for the container's dataset it will be // automatically mounted. output, err := exec.Command("zfs", "set", "mountpoint=none", imageDataset).CombinedOutput() if err != nil { - return fmt.Errorf("Failed to set new ZFS mountpoint: %s.", output) + shared.LogWarnf("Failed to set new ZFS mountpoint: %s.", output) } } + var finalErr error + if len(failedUpgradeEntities) > 0 { + finalErr = fmt.Errorf(strings.Join(failedUpgradeEntities, "\n")) + } + + return finalErr +} + +func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers []string) error { + // The new storage api enforces that the default storage pool on which + // containers are created is set in the default profile. If it isn't + // set, then LXD will refuse to create a container until either an + // appropriate device including a pool is added to the default profile + // or the user explicitly passes the pool the container's storage volume + // is supposed to be created on. + profiles, err := dbProfiles(d.db) + if err == nil { + for _, pName := range profiles { + pID, p, err := dbProfileGet(d.db, pName) + if err != nil { + shared.LogErrorf("Could not query database: %s.", err) + return err + } + + // Check for a root disk device entry + k, _ := containerGetRootDiskDevice(p.Devices) + if k != "" { + if p.Devices[k]["pool"] != "" { + continue + } + p.Devices[k]["pool"] = poolName + } else if k == "" && pName == "default" { + // The default profile should have a valid root + // disk device entry. + rootDev := map[string]string{} + rootDev["type"] = "disk" + rootDev["path"] = "/" + rootDev["pool"] = poolName + if p.Devices == nil { + p.Devices = map[string]map[string]string{} + } + + // Make sure that we do not overwrite a device the user + // is currently using under the name "root". + rootDevName := "root" + for i := 0; i < 100; i++ { + if p.Devices[rootDevName] == nil { + break + } + rootDevName = fmt.Sprintf("root%d", i) + continue + } + p.Devices["root"] = rootDev + } + + // This is nasty, but we need to clear the profiles config and + // devices in order to add the new root device including the + // newly added storage pool. + tx, err := dbBegin(d.db) + if err != nil { + return err + } + + err = dbProfileConfigClear(tx, pID) + if err != nil { + shared.LogErrorf("Failed to clear old profile configuration for profile %s: %s.", pName, err) + tx.Rollback() + continue + } + + err = dbProfileConfigAdd(tx, pID, p.Config) + if err != nil { + shared.LogErrorf("Failed to add new profile configuration: %s: %s.", pName, err) + tx.Rollback() + continue + } + + err = dbDevicesAdd(tx, "profile", pID, p.Devices) + if err != nil { + shared.LogErrorf("Failed to add new profile profile root disk device: %s: %s.", pName, err) + tx.Rollback() + continue + } + + err = tx.Commit() + if err != nil { + shared.LogErrorf("Failed to commit database transaction: %s: %s.", pName, err) + tx.Rollback() + continue + } + } + } + + // When no default profile is detected or some containers do not rely on + // the default profile for their root disk device, these containers will + // be given a valid local root disk device." + for _, ct := range allcontainers { + c, err := containerLoadByName(d, ct) + if err != nil { + continue + } + + args := containerArgs{ + Architecture: c.Architecture(), + Config: c.LocalConfig(), + Ephemeral: c.IsEphemeral(), + CreationDate: c.CreationDate(), + LastUsedDate: c.LastUsedDate(), + Name: c.Name(), + Profiles: c.Profiles(), + } + + if c.IsSnapshot() { + args.Ctype = cTypeSnapshot + } else { + args.Ctype = cTypeRegular + } + + // Check expanded devices for a valid root entry. If it exists, + // we skip this container. + expandedDevices := c.ExpandedDevices() + k, _ := containerGetRootDiskDevice(expandedDevices) + if k != "" && expandedDevices[k]["pool"] != "" { + // On partial upgrade the container might already have a + // valid root disk device entry. + if expandedDevices[k]["pool"] == poolName { + continue + } + } + + // Check for a local root disk device entry and set the pool + // property. + localDevices := c.LocalDevices() + k, _ = containerGetRootDiskDevice(localDevices) + if k != "" { + if localDevices[k]["pool"] != "" { + continue + } + localDevices[k]["pool"] = poolName + args.Devices = localDevices + } else { + rootDev := map[string]string{} + rootDev["type"] = "disk" + rootDev["path"] = "/" + rootDev["pool"] = poolName + + // Make sure that we do not overwrite a device the user + // is currently using under the name "root". + rootDevName := "root" + for i := 0; i < 100; i++ { + if localDevices[rootDevName] == nil { + break + } + rootDevName = fmt.Sprintf("root%d", i) + continue + } + localDevices[rootDevName] = rootDev + } + + err = c.Update(args, false) + if err != nil { + continue + } + } + + return nil +} + +func patchStorageApiV1(name string, d *Daemon) error { + pools, err := dbStoragePools(d.db) + if err != nil && err == NoSuchObjectError { + // No pool was configured in the previous update. So we're on a + // pristine LXD instance. + return nil + } else if err != nil { + // Database is screwed. + shared.LogErrorf("Failed to query database: %s", err) + return err + } + + if len(pools) != 1 { + shared.LogWarnf("More than one storage pool found. Not rerunning upgrade.") + return nil + } + + cRegular, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err + } + + // Get list of existing snapshots. + cSnapshots, err := dbContainersList(d.db, cTypeSnapshot) + if err != nil { + return err + } + + allcontainers := append(cRegular, cSnapshots...) + err = updatePoolPropertyForAllObjects(d, pools[0], allcontainers) + if err != nil { + return err + } + + return nil +} + +func patchStorageApiDirCleanup(name string, d *Daemon) error { + _, err := dbExec(d.db, "DELETE FROM storage_volumes WHERE type=? AND name NOT IN (SELECT fingerprint FROM images);", storagePoolVolumeTypeImage) + if err != nil { + return err + } + return nil } diff -Nru lxd-2.9.1/lxd/profiles.go lxd-2.9.2/lxd/profiles.go --- lxd-2.9.1/lxd/profiles.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/profiles.go 2017-02-21 04:42:34.000000000 +0000 @@ -6,7 +6,6 @@ "fmt" "io/ioutil" "net/http" - "reflect" "strings" "github.com/gorilla/mux" @@ -240,102 +239,6 @@ return doProfileUpdate(d, name, id, profile, req) } -func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req api.ProfilePut) Response { - // Sanity checks - err := containerValidConfig(d, req.Config, true, false) - if err != nil { - return BadRequest(err) - } - - err = containerValidDevices(req.Devices, true, false) - if err != nil { - return BadRequest(err) - } - - // Get the container list - containers := getContainersWithProfile(d, name) - - // Check that we only change the root disk device for profiles that do - // not have any containers currently using it. - for _, v := range req.Devices { - if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && len(containers) > 0 { - return BadRequest(fmt.Errorf("Cannot change root disk device of a profile if containers are still using it.")) - } - } - - // Update the database - tx, err := dbBegin(d.db) - if err != nil { - return InternalError(err) - } - - if profile.Description != req.Description { - err = dbProfileDescriptionUpdate(tx, id, req.Description) - if err != nil { - tx.Rollback() - return InternalError(err) - } - } - - // Optimize for description-only changes - if reflect.DeepEqual(profile.Config, req.Config) && reflect.DeepEqual(profile.Devices, req.Devices) { - err = txCommit(tx) - if err != nil { - return InternalError(err) - } - - return EmptySyncResponse - } - - err = dbProfileConfigClear(tx, id) - if err != nil { - tx.Rollback() - return InternalError(err) - } - - err = dbProfileConfigAdd(tx, id, req.Config) - if err != nil { - tx.Rollback() - return SmartError(err) - } - - err = dbDevicesAdd(tx, "profile", id, req.Devices) - if err != nil { - tx.Rollback() - return SmartError(err) - } - - err = txCommit(tx) - if err != nil { - return InternalError(err) - } - - // Update all the containers using the profile. Must be done after txCommit due to DB lock. - failures := map[string]error{} - for _, c := range containers { - err = c.Update(containerArgs{ - Architecture: c.Architecture(), - Ephemeral: c.IsEphemeral(), - Config: c.LocalConfig(), - Devices: c.LocalDevices(), - Profiles: c.Profiles()}, true) - - if err != nil { - failures[c.Name()] = err - } - } - - if len(failures) != 0 { - msg := "The following containers failed to update (profile change still saved):\n" - for cname, err := range failures { - msg += fmt.Sprintf(" - %s: %s\n", cname, err) - } - return InternalError(fmt.Errorf("%s", msg)) - } - - return EmptySyncResponse -} - // The handler for the post operation. func profilePost(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] diff -Nru lxd-2.9.1/lxd/profiles_utils.go lxd-2.9.2/lxd/profiles_utils.go --- lxd-2.9.1/lxd/profiles_utils.go 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/lxd/profiles_utils.go 2017-02-21 04:42:34.000000000 +0000 @@ -0,0 +1,135 @@ +package main + +import ( + "fmt" + "reflect" + + "github.com/lxc/lxd/shared/api" +) + +func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req api.ProfilePut) Response { + // Sanity checks + err := containerValidConfig(d, req.Config, true, false) + if err != nil { + return BadRequest(err) + } + + err = containerValidDevices(req.Devices, true, false) + if err != nil { + return BadRequest(err) + } + + containers := getContainersWithProfile(d, name) + + // Check if the root device is supposed to be changed or removed. + oldProfileRootDiskDeviceKey, oldProfileRootDiskDevice := containerGetRootDiskDevice(profile.Devices) + _, newProfileRootDiskDevice := containerGetRootDiskDevice(req.Devices) + if len(containers) > 0 && + oldProfileRootDiskDevice["pool"] != "" && + newProfileRootDiskDevice["pool"] == "" || + (oldProfileRootDiskDevice["pool"] != newProfileRootDiskDevice["pool"]) { + // Check for containers using the device + for _, container := range containers { + // Check if the device is locally overridden + localDevices := container.LocalDevices() + k, v := containerGetRootDiskDevice(localDevices) + if k != "" && v["pool"] != "" { + continue + } + + // Check what profile the device comes from + profiles := container.Profiles() + for i := len(profiles) - 1; i >= 0; i-- { + _, profile, err := dbProfileGet(d.db, profiles[i]) + if err != nil { + return InternalError(err) + } + + // Check if we find a match for the device + _, ok := profile.Devices[oldProfileRootDiskDeviceKey] + if ok { + // Found the profile + if profiles[i] == name { + // If it's the current profile, then we can't modify that root device + return BadRequest(fmt.Errorf("At least one container relies on this profile's root disk device.")) + } else { + // If it's not, then move on to the next container + break + } + } + } + } + } + + // Update the database + tx, err := dbBegin(d.db) + if err != nil { + return InternalError(err) + } + + if profile.Description != req.Description { + err = dbProfileDescriptionUpdate(tx, id, req.Description) + if err != nil { + tx.Rollback() + return InternalError(err) + } + } + + // Optimize for description-only changes + if reflect.DeepEqual(profile.Config, req.Config) && reflect.DeepEqual(profile.Devices, req.Devices) { + err = txCommit(tx) + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse + } + + err = dbProfileConfigClear(tx, id) + if err != nil { + tx.Rollback() + return InternalError(err) + } + + err = dbProfileConfigAdd(tx, id, req.Config) + if err != nil { + tx.Rollback() + return SmartError(err) + } + + err = dbDevicesAdd(tx, "profile", id, req.Devices) + if err != nil { + tx.Rollback() + return SmartError(err) + } + + err = txCommit(tx) + if err != nil { + return InternalError(err) + } + + // Update all the containers using the profile. Must be done after txCommit due to DB lock. + failures := map[string]error{} + for _, c := range containers { + err = c.Update(containerArgs{ + Architecture: c.Architecture(), + Ephemeral: c.IsEphemeral(), + Config: c.LocalConfig(), + Devices: c.LocalDevices(), + Profiles: c.Profiles()}, true) + + if err != nil { + failures[c.Name()] = err + } + } + + if len(failures) != 0 { + msg := "The following containers failed to update (profile change still saved):\n" + for cname, err := range failures { + msg += fmt.Sprintf(" - %s: %s\n", cname, err) + } + return InternalError(fmt.Errorf("%s", msg)) + } + + return EmptySyncResponse +} diff -Nru lxd-2.9.1/lxd/storage_btrfs.go lxd-2.9.2/lxd/storage_btrfs.go --- lxd-2.9.1/lxd/storage_btrfs.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/storage_btrfs.go 2017-02-21 04:42:34.000000000 +0000 @@ -210,6 +210,13 @@ return err1 } + // Enable quotas + output, err = exec.Command( + "btrfs", "quota", "enable", poolMntPoint).CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to enable quotas on BTRFS pool: %s", output) + } + // Create default subvolumes. dummyDir := getContainerMountPoint(s.pool.Name, "") err = s.btrfsPoolVolumeCreate(dummyDir) diff -Nru lxd-2.9.1/lxd/storage_dir.go lxd-2.9.2/lxd/storage_dir.go --- lxd-2.9.1/lxd/storage_dir.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/lxd/storage_dir.go 2017-02-21 04:42:34.000000000 +0000 @@ -635,6 +635,11 @@ } func (s *storageDir) ImageDelete(fingerprint string) error { + err := s.deleteImageDbPoolVolume(fingerprint) + if err != nil { + return err + } + return nil } diff -Nru lxd-2.9.1/Makefile lxd-2.9.2/Makefile --- lxd-2.9.1/Makefile 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/Makefile 2017-02-21 04:42:34.000000000 +0000 @@ -13,27 +13,18 @@ .PHONY: default default: - # Must a few times due to go get race - -go get -t -v -d ./... - -go get -t -v -d ./... - -go get -t -v -d ./... + go get -t -v -d ./... go install -v $(TAGS) $(DEBUG) ./... @echo "LXD built successfully" .PHONY: client client: - # Must a few times due to go get race - -go get -t -v -d ./... - -go get -t -v -d ./... - -go get -t -v -d ./... + go get -t -v -d ./... go install -v $(TAGS) $(DEBUG) ./lxc @echo "LXD client built successfully" .PHONY: update update: - # Must a few times due to go get race - -go get -t -v -d -u ./... - -go get -t -v -d -u ./... go get -t -v -d -u ./... @echo "Dependencies updated" @@ -68,11 +59,11 @@ ln -s ../../../../lxd-$(VERSION) $(TMP)/dist/src/github.com/lxc/lxd # Download dependencies - -cd $(TMP)/lxd-$(VERSION) && GOPATH=$(TMP)/dist go get -t -v -d ./... - -cd $(TMP)/lxd-$(VERSION) && GOPATH=$(TMP)/dist go get -t -v -d ./... - -cd $(TMP)/lxd-$(VERSION) && GOPATH=$(TMP)/dist go get -t -v -d ./... cd $(TMP)/lxd-$(VERSION) && GOPATH=$(TMP)/dist go get -t -v -d ./... + # Workaround for gorilla/mux on Go < 1.7 + cd $(TMP)/lxd-$(VERSION) && GOPATH=$(TMP)/dist go get -v -d github.com/gorilla/context + # Assemble tarball rm $(TMP)/dist/src/github.com/lxc/lxd ln -s ../../../../ $(TMP)/dist/src/github.com/lxc/lxd @@ -92,7 +83,7 @@ msgmerge -U po/$*.po po/$(DOMAIN).pot update-po: - -for lang in $(LINGUAS); do\ + for lang in $(LINGUAS); do\ msgmerge -U $$lang.po po/$(DOMAIN).pot; \ rm -f $$lang.po~; \ done diff -Nru lxd-2.9.1/shared/version/flex.go lxd-2.9.2/shared/version/flex.go --- lxd-2.9.1/shared/version/flex.go 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/shared/version/flex.go 2017-02-21 04:42:34.000000000 +0000 @@ -3,7 +3,7 @@ */ package version -var Version = "2.9.1" +var Version = "2.9.2" var UserAgent = "LXD " + Version /* diff -Nru lxd-2.9.1/test/main.sh lxd-2.9.2/test/main.sh --- lxd-2.9.1/test/main.sh 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/test/main.sh 2017-02-21 04:42:34.000000000 +0000 @@ -397,6 +397,113 @@ rm -Rf "${1}" } +configure_lvm_loop_device() { + lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.lvm) + truncate -s 4G "${lv_loop_file}" + pvloopdev=$(losetup --show -f "${lv_loop_file}") + if [ ! -e "${pvloopdev}" ]; then + echo "failed to setup loop" + false + fi + + pvcreate "${pvloopdev}" + + # The following code enables to return a value from a shell function by + # calling the function as: fun VAR1 + + # shellcheck disable=2039 + local __tmp1="${1}" + # shellcheck disable=2039 + local res1="${lv_loop_file}" + if [ "${__tmp1}" ]; then + eval "${__tmp1}='${res1}'" + fi + + # shellcheck disable=2039 + local __tmp2="${2}" + # shellcheck disable=2039 + local res2="${pvloopdev}" + if [ "${__tmp2}" ]; then + eval "${__tmp2}='${res2}'" + fi +} + +deconfigure_lvm_loop_device() { + lv_loop_file="${1}" + loopdev="${2}" + + SUCCESS=0 + # shellcheck disable=SC2034 + for i in $(seq 10); do + pvremove -f "${loopdev}" > /dev/null 2>&1 || true + if losetup -d "${loopdev}"; then + SUCCESS=1 + break + fi + + sleep 0.5 + done + + if [ "${SUCCESS}" = "0" ]; then + echo "Failed to tear down loop device." + false + fi + + rm -f "${lv_loop_file}" +} + +configure_loop_device() { + lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.img) + truncate -s 10G "${lv_loop_file}" + pvloopdev=$(losetup --show -f "${lv_loop_file}") + if [ ! -e "${pvloopdev}" ]; then + echo "failed to setup loop" + false + fi + + # The following code enables to return a value from a shell function by + # calling the function as: fun VAR1 + + # shellcheck disable=2039 + local __tmp1="${1}" + # shellcheck disable=2039 + local res1="${lv_loop_file}" + if [ "${__tmp1}" ]; then + eval "${__tmp1}='${res1}'" + fi + + # shellcheck disable=2039 + local __tmp2="${2}" + # shellcheck disable=2039 + local res2="${pvloopdev}" + if [ "${__tmp2}" ]; then + eval "${__tmp2}='${res2}'" + fi +} + +deconfigure_loop_device() { + lv_loop_file="${1}" + loopdev="${2}" + + SUCCESS=0 + # shellcheck disable=SC2034 + for i in $(seq 10); do + if losetup -d "${loopdev}"; then + SUCCESS=1 + break + fi + + sleep 0.5 + done + + if [ "${SUCCESS}" = "0" ]; then + echo "Failed to tear down loop device" + false + fi + + rm -f "${lv_loop_file}" +} + # Must be set before cleanup() TEST_CURRENT=setup TEST_RESULT=failure @@ -479,5 +586,7 @@ run_test test_cpu_profiling "CPU profiling" run_test test_mem_profiling "memory profiling" run_test test_storage "storage" +run_test test_lxd_autoinit "lxd init auto" +run_test test_storage_profiles "storage profiles" TEST_RESULT=success diff -Nru lxd-2.9.1/test/suites/init.sh lxd-2.9.2/test/suites/init.sh --- lxd-2.9.1/test/suites/init.sh 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/test/suites/init.sh 2017-02-21 04:42:34.000000000 +0000 @@ -0,0 +1,88 @@ +#!/bin/sh + +test_lxd_autoinit() { + # lxd init --auto + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" false + + ZFS_POOL="lxdtest-$(basename "${LXD_DIR}")-init" + LXD_DIR=${LXD_INIT_DIR} lxd init --auto + + kill_lxd "${LXD_INIT_DIR}" + + # lxd init --auto --storage-backend zfs + if [ "${LXD_BACKEND}" = "zfs" ]; then + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" false + + LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs + + kill_lxd "${LXD_INIT_DIR}" + fi + + # lxd init --auto --storage-backend zfs --storage-pool + if [ "${LXD_BACKEND}" = "zfs" ]; then + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" false + + # shellcheck disable=SC2154 + configure_loop_device loop_file_1 loop_device_1 + # shellcheck disable=SC2154 + zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on + LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" + + kill_lxd "${LXD_INIT_DIR}" + # shellcheck disable=SC2154 + deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" + fi + + # lxd init --auto --storage-backend zfs --storage-pool / + if [ "${LXD_BACKEND}" = "zfs" ]; then + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" false + + configure_loop_device loop_file_1 loop_device_1 + # shellcheck disable=SC2154 + zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on + LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/non-existing-dataset" + + kill_lxd "${LXD_INIT_DIR}" + deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" + # shellcheck disable=SC2154 + zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" + fi + + # lxd init --auto --storage-backend zfs --storage-pool / + if [ "${LXD_BACKEND}" = "zfs" ]; then + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" false + + configure_loop_device loop_file_1 loop_device_1 + # shellcheck disable=SC2154 + zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -f -m none -O compression=on + zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" + LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" + + kill_lxd "${LXD_INIT_DIR}" + deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" + # shellcheck disable=SC2154 + zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" + fi + + # lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool --auto + if [ "${LXD_BACKEND}" = "zfs" ]; then + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" false + + ZFS_POOL="lxdtest-$(basename "${LXD_DIR}")-init" + LXD_DIR=${LXD_INIT_DIR} lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool "${ZFS_POOL}" --auto + + kill_lxd "${LXD_INIT_DIR}" + fi +} diff -Nru lxd-2.9.1/test/suites/storage_profiles.sh lxd-2.9.2/test/suites/storage_profiles.sh --- lxd-2.9.1/test/suites/storage_profiles.sh 1970-01-01 00:00:00.000000000 +0000 +++ lxd-2.9.2/test/suites/storage_profiles.sh 2017-02-21 04:42:34.000000000 +0000 @@ -0,0 +1,147 @@ +#!/bin/sh + +test_storage_profiles() { + # shellcheck disable=2039 + + LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX) + chmod +x "${LXD_STORAGE_DIR}" + spawn_lxd "${LXD_STORAGE_DIR}" false + ( + set -e + # shellcheck disable=2030 + LXD_DIR="${LXD_STORAGE_DIR}" + + HAS_ZFS="dir" + if which zfs >/dev/null 2>&1; then + HAS_ZFS="zfs" + fi + + HAS_BTRFS="dir" + if which zfs >/dev/null 2>&1; then + HAS_BTRFS="btrfs" + fi + + # shellcheck disable=SC1009 + # Create loop file zfs pool. + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" "${HAS_ZFS}" + + # Create loop file btrfs pool. + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" "${HAS_BTRFS}" + + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" dir + + # Set default storage pool for image import. + lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + + # Import image into default storage pool. + ensure_import_testimage + + lxc profile create dummy + + # Create a new profile that provides a root device for some containers. + lxc profile device add dummy rootfs disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + + # Begin interesting test cases. + + for i in $(seq 1 3); do + lxc launch testimage c"${i}" --profile dummy + done + wait + + # Check that we can't remove or change the root disk device since containers + # are actually using it. + ! lxc profile device remove dummy rootfs + ! lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool2" + + # Give all the containers we started their own local root disk device. + for i in $(seq 1 2); do + lxc config device add c"${i}" root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + done + + # Try to set new pool. This should fail since there is a single container + # that has no local root disk device. + ! lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool2" + # This should work since it doesn't change the pool property. + lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool1" + # Check that we can not remove the root disk device since there is a single + # container that is still using it. + ! lxc profile device remove dummy rootfs + + # Give the last container a local root disk device. + lxc config device add c3 root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + + # Try to set new pool. This should work since the container has a local disk + lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool2" + lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool1" + # Check that we can now remove the root disk device since no container is + # actually using it. + lxc profile device remove dummy rootfs + + # Add back a root device to the profile. + lxc profile device add dummy rootfs1 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + + # Try to add another root device to the profile that tries to set a pool + # property. This should fail. This is also a test for whether it is possible + # to put multiple disk devices on the same path. This must fail! + ! lxc profile device add dummy rootfs2 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool2" + + # Add another root device to the profile that does not set a pool property. + # This should not work since it would use the same path. + ! lxc profile device add dummy rootfs3 disk path="/" + + # Create a second profile. + lxc profile create dummyDup + lxc profile device add dummyDup rootfs1 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + + # Create a third profile + lxc profile create dummyNoDup + lxc profile device add dummyNoDup rootfs2 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool2" + + # Verify that we cannot create a container with profiles that have + # contradicting root devices. + ! lxc launch testimage cConflictingProfiles --p dummy -p dummyDup -p dummyNoDup + + # Verify that we can create a container with profiles that have + # contradicting root devices if the container has a local root device set. + lxc launch testimage cConflictingProfiles2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -p dummy -p dummyDup -p dummyNoDup + + # Verify that we cannot remove the local root disk device if the profiles + # have contradicting root disk devices. + ! lxc config device remove cConflictingProfiles2 root + + # Check that we cannot assign conflicting profiles to a container that + # relies on another profiles root disk device. + lxc launch testimage cOnDefault + ! lxc profile assign cOnDefault default,dummyDup,dummyNoDup + + # Verify that we can create a container with two profiles that speficy the + # same root disk device. + lxc launch testimage cNonConflictingProfiles -p dummy -p dummyDup + + # Try to remove the root disk device from one of the profiles. + lxc profile device remove dummy rootfs1 + + # Try to remove the root disk device from the second profile. + ! lxc profile device remove dummyDup rootfs1 + + # Test that we can't remove the root disk device from the containers config + # when the profile it is attached to specifies no root device. + for i in $(seq 1 3); do + ! lxc config device remove c"${i}" root + # Must fail. + ! lxc profile assign c"${i}" dummyDup,dummyNoDup + done + + lxc delete -f cConflictingProfiles2 + lxc delete -f cNonConflictingProfiles + lxc delete -f cOnDefault + for i in $(seq 1 3); do + lxc delete -f c"${i}" + done + + ) + + # shellcheck disable=SC2031 + LXD_DIR="${LXD_DIR}" + kill_lxd "${LXD_STORAGE_DIR}" +} diff -Nru lxd-2.9.1/test/suites/storage.sh lxd-2.9.2/test/suites/storage.sh --- lxd-2.9.1/test/suites/storage.sh 2017-02-16 17:27:01.000000000 +0000 +++ lxd-2.9.2/test/suites/storage.sh 2017-02-21 04:42:34.000000000 +0000 @@ -1,113 +1,8 @@ #!/bin/sh -configure_lvm_loop_device() { - lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.lvm) - truncate -s 4G "${lv_loop_file}" - pvloopdev=$(losetup --show -f "${lv_loop_file}") - if [ ! -e "${pvloopdev}" ]; then - echo "failed to setup loop" - false - fi - - pvcreate "${pvloopdev}" - - # The following code enables to return a value from a shell function by - # calling the function as: fun VAR1 - - # shellcheck disable=2039 - local __tmp1="${1}" - # shellcheck disable=2039 - local res1="${lv_loop_file}" - if [ "${__tmp1}" ]; then - eval "${__tmp1}='${res1}'" - fi - - # shellcheck disable=2039 - local __tmp2="${2}" - # shellcheck disable=2039 - local res2="${pvloopdev}" - if [ "${__tmp2}" ]; then - eval "${__tmp2}='${res2}'" - fi -} - -deconfigure_lvm_loop_device() { - lv_loop_file="${1}" - loopdev="${2}" - - SUCCESS=0 - # shellcheck disable=SC2034 - for i in $(seq 10); do - pvremove -f "${loopdev}" > /dev/null 2>&1 || true - if losetup -d "${loopdev}"; then - SUCCESS=1 - break - fi - - sleep 0.5 - done - - if [ "${SUCCESS}" = "0" ]; then - echo "Failed to tear down loop device." - false - fi - - rm -f "${lv_loop_file}" -} - -configure_loop_device() { - lv_loop_file=$(mktemp -p "${TEST_DIR}" XXXX.img) - truncate -s 10G "${lv_loop_file}" - pvloopdev=$(losetup --show -f "${lv_loop_file}") - if [ ! -e "${pvloopdev}" ]; then - echo "failed to setup loop" - false - fi - - # The following code enables to return a value from a shell function by - # calling the function as: fun VAR1 - - # shellcheck disable=2039 - local __tmp1="${1}" - # shellcheck disable=2039 - local res1="${lv_loop_file}" - if [ "${__tmp1}" ]; then - eval "${__tmp1}='${res1}'" - fi - - # shellcheck disable=2039 - local __tmp2="${2}" +test_storage() { # shellcheck disable=2039 - local res2="${pvloopdev}" - if [ "${__tmp2}" ]; then - eval "${__tmp2}='${res2}'" - fi -} - -deconfigure_loop_device() { - lv_loop_file="${1}" - loopdev="${2}" - - SUCCESS=0 - # shellcheck disable=SC2034 - for i in $(seq 10); do - if losetup -d "${loopdev}"; then - SUCCESS=1 - break - fi - - sleep 0.5 - done - - if [ "${SUCCESS}" = "0" ]; then - echo "Failed to tear down loop device" - false - fi - - rm -f "${lv_loop_file}" -} -test_storage() { LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX) chmod +x "${LXD_STORAGE_DIR}" spawn_lxd "${LXD_STORAGE_DIR}" false @@ -116,151 +11,168 @@ # shellcheck disable=2030 LXD_DIR="${LXD_STORAGE_DIR}" - # Only create zfs pools on 64 bit arches. I think getconf LONG_BIT should - # even work when running a 32bit userspace on a 64 bit kernel. - ARCH=$(getconf LONG_BIT) - BACKEND=btrfs - if [ "${ARCH}" = "64" ]; then - BACKEND=zfs - fi - + # shellcheck disable=SC1009 + if which zfs >/dev/null 2>&1; then # Create loop file zfs pool. - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" "${BACKEND}" + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" zfs - if [ "${BACKEND}" = "zfs" ]; then - # Let LXD use an already existing dataset. - zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" - - # Let LXD use an already existing storage pool. - configure_loop_device loop_file_4 loop_device_4 - # shellcheck disable=SC2154 - zpool create "lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" "${loop_device_4}" -f -m none -O compression=on - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" - - # Let LXD create a new dataset and use as pool. - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" "${BACKEND}" source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool" - fi - - # Create device backed zfs pool - configure_loop_device loop_file_1 loop_device_1 - # shellcheck disable=SC2154 - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" "${BACKEND}" source="${loop_device_1}" - - # Create loop file btrfs pool. - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool3" btrfs - - # Create device backed btrfs pool. - configure_loop_device loop_file_2 loop_device_2 - # shellcheck disable=SC2154 - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" btrfs source="${loop_device_2}" + # Let LXD use an already existing dataset. + zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool" + + # Let LXD use an already existing storage pool. + configure_loop_device loop_file_4 loop_device_4 + # shellcheck disable=SC2154 + zpool create "lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" "${loop_device_4}" -f -m none -O compression=on + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" + + # Let LXD create a new dataset and use as pool. + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool" + + # Create device backed zfs pool + configure_loop_device loop_file_1 loop_device_1 + # shellcheck disable=SC2154 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" zfs source="${loop_device_1}" + fi + + if which btrfs >/dev/null 2>&1; then + # Create loop file btrfs pool. + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool3" btrfs + + # Create device backed btrfs pool. + configure_loop_device loop_file_2 loop_device_2 + # shellcheck disable=SC2154 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" btrfs source="${loop_device_2}" + fi # Create dir pool. lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool5" dir - # Create lvm pool. - configure_lvm_loop_device loop_file_3 loop_device_3 - # shellcheck disable=SC2154 - lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm source="${loop_device_3}" + if which lvdisplay >/dev/null 2>&1; then + # Create lvm pool. + configure_lvm_loop_device loop_file_3 loop_device_3 + # shellcheck disable=SC2154 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm source="${loop_device_3}" + fi # Set default storage pool for image import. - lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1" + lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool5" # Import image into default storage pool. ensure_import_testimage # Muck around with some containers on various pools. - lxc init testimage c1pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" - lxc list -c b c1pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" - lxc init testimage c2pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" - lxc list -c b c2pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" - - lxc launch testimage c3pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" - lxc list -c b c3pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" - lxc launch testimage c4pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" - lxc list -c b c4pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" - - lxc init testimage c5pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" - lxc list -c b c5pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" - lxc init testimage c6pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" - lxc list -c b c6pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" - - lxc launch testimage c7pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" - lxc list -c b c7pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" - lxc launch testimage c8pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" - lxc list -c b c8pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" + if which zfs >/dev/null 2>&1; then + lxc init testimage c1pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" + lxc list -c b c1pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" + + lxc init testimage c2pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" + lxc list -c b c2pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" + + lxc launch testimage c3pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1" + lxc list -c b c3pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1" + + lxc launch testimage c4pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" + lxc list -c b c4pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2" + fi + + if which btrfs >/dev/null 2>&1; then + lxc init testimage c5pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" + lxc list -c b c5pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" + lxc init testimage c6pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" + lxc list -c b c6pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" + + lxc launch testimage c7pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3" + lxc list -c b c7pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3" + lxc launch testimage c8pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4" + lxc list -c b c8pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4" + fi lxc init testimage c9pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5" lxc list -c b c9pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5" - lxc init testimage c10pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" - lxc list -c b c10pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" lxc launch testimage c11pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5" lxc list -c b c11pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5" - lxc launch testimage c12pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" - lxc list -c b c12pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" - if [ "${BACKEND}" = "zfs" ]; then - lxc launch testimage c13pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" - lxc launch testimage c14pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" + if which lvdisplay >/dev/null 2>&1; then + lxc init testimage c10pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" + lxc list -c b c10pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" + + lxc launch testimage c12pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6" + lxc list -c b c12pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6" + fi - lxc launch testimage c15pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" - lxc launch testimage c16pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" + if which zfs >/dev/null 2>&1; then + lxc launch testimage c13pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" + lxc launch testimage c14pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7" - lxc launch testimage c17pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" - lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" + lxc launch testimage c15pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" + lxc launch testimage c16pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8" + + lxc launch testimage c17pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" + lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9" fi - lxc delete -f c1pool1 - lxc delete -f c2pool2 + if which zfs >/dev/null 2>&1; then + lxc delete -f c1pool1 + lxc delete -f c3pool1 - lxc delete -f c3pool1 - lxc delete -f c4pool2 + lxc delete -f c4pool2 + lxc delete -f c2pool2 + fi - lxc delete -f c5pool3 - lxc delete -f c6pool4 + if which btrfs >/dev/null 2>&1; then + lxc delete -f c5pool3 + lxc delete -f c7pool3 - lxc delete -f c7pool3 - lxc delete -f c8pool4 + lxc delete -f c8pool4 + lxc delete -f c6pool4 + fi lxc delete -f c9pool5 - lxc delete -f c10pool6 - lxc delete -f c11pool5 - lxc delete -f c12pool6 - if [ "${BACKEND}" = "zfs" ]; then - lxc delete -f c13pool7 - lxc delete -f c14pool7 + if which lvdisplay >/dev/null 2>&1; then + lxc delete -f c10pool6 + lxc delete -f c12pool6 + fi + + if which zfs >/dev/null 2>&1; then + lxc delete -f c13pool7 + lxc delete -f c14pool7 - lxc delete -f c15pool8 - lxc delete -f c16pool8 + lxc delete -f c15pool8 + lxc delete -f c16pool8 - lxc delete -f c17pool9 - lxc delete -f c18pool9 + lxc delete -f c17pool9 + lxc delete -f c18pool9 fi lxc image delete testimage - if [ "${BACKEND}" = "zfs" ]; then - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool7" - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool8" - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool9" - # shellcheck disable=SC2154 - deconfigure_loop_device "${loop_file_4}" "${loop_device_4}" - fi - - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool2" - # shellcheck disable=SC2154 - deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" - - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool4" - # shellcheck disable=SC2154 - deconfigure_loop_device "${loop_file_2}" "${loop_device_2}" - - lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool6" - # shellcheck disable=SC2154 - deconfigure_lvm_loop_device "${loop_file_3}" "${loop_device_3}" + if which zfs >/dev/null 2>&1; then + lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool7" + lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool8" + lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool9" + # shellcheck disable=SC2154 + deconfigure_loop_device "${loop_file_4}" "${loop_device_4}" + + lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool2" + # shellcheck disable=SC2154 + deconfigure_loop_device "${loop_file_1}" "${loop_device_1}" + fi + + if which btrfs >/dev/null 2>&1; then + lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool4" + # shellcheck disable=SC2154 + deconfigure_loop_device "${loop_file_2}" "${loop_device_2}" + fi + + if which lvdisplay >/dev/null 2>&1; then + lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool6" + # shellcheck disable=SC2154 + deconfigure_lvm_loop_device "${loop_file_3}" "${loop_device_3}" + fi ) # shellcheck disable=SC2031