diff -Nru snapd-2.62+23.10/.github/workflows/nightly.yaml snapd-2.63+23.10/.github/workflows/nightly.yaml --- snapd-2.62+23.10/.github/workflows/nightly.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/.github/workflows/nightly.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,94 @@ +name: Nightly static code analysis + +on: + workflow_dispatch: + schedule: + - cron: '30 0 * * *' + +jobs: + + tics: + runs-on: ubuntu-22.04 + env: + GOPATH: ${{ github.workspace }} + # Set PATH to ignore the load of magic binaries from /usr/local/bin and + # to use the go snap automatically. Note that we install go from the + # snap in a step below. Without this we get the GitHub-controlled latest + # version of go. + PATH: /snap/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:${{ github.workspace }}/bin + GOROOT: "" + strategy: + matrix: + gochannel: + - 1.18 + unit-scenario: + - normal + + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + # needed for git commit history + fetch-depth: 0 + # NOTE: checkout the code in a fixed location, even for forks, as this + # is relevant for go's import system. + path: ./src/github.com/snapcore/snapd + + - name: Download Debian dependencies + run: | + sudo apt clean + sudo apt update + sudo apt build-dep -y "${{ github.workspace }}/src/github.com/snapcore/snapd" + + - name: Install the go snap + run: | + sudo snap install --classic --channel="${{ matrix.gochannel }}" go + + - name: Get deps + run: | + cd "${{ github.workspace }}/src/github.com/snapcore/snapd" + ./get-deps.sh + + - name: Build C + run: | + cd "${{ github.workspace }}/src/github.com/snapcore/snapd/cmd" + ./autogen.sh + make -j$(nproc) + + - name: Build Go + run: | + go build github.com/snapcore/snapd/... + + - name: Test C + run: | + cd "${{ github.workspace }}/src/github.com/snapcore/snapd/cmd" + make check + + - name: Reset code coverage data + run: | + rm -rf "${{ github.workspace }}/src/github.com/snapcore/snapd/.coverage" + + - name: Test Go with coverage + run: | + go install github.com/boumenot/gocover-cobertura@latest + + cd "${{ github.workspace }}/src/github.com/snapcore/snapd" + COVERAGE_OUT=.coverage/coverage.txt ./run-checks --unit + gocover-cobertura < .coverage/coverage.txt > .coverage/coverage.xml + + - name: TICS scan + run: | + set -x + export TICSAUTHTOKEN="${{ secrets.TICSAUTHTOKEN }}" + + # Install the TICS + curl --silent --show-error "https://canonical.tiobe.com/tiobeweb/TICS/api/public/v1/fapi/installtics/Script?cfg=default&platform=linux&url=https://canonical.tiobe.com/tiobeweb/TICS/" > install_tics.sh + . ./install_tics.sh + + TICSQServer -project snapd -tmpdir /tmp/tics -branchdir "${{ github.workspace }}/src/github.com/snapcore/snapd" + + - name: Uploading TICS logs + uses: actions/upload-artifact@v4 + with: + name: tics-logs.tar.gz + path: tics-logs.tar.gz diff -Nru snapd-2.62+23.10/.github/workflows/test.yaml snapd-2.63+23.10/.github/workflows/test.yaml --- snapd-2.62+23.10/.github/workflows/test.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/.github/workflows/test.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -236,6 +236,27 @@ exit 1 fi + branch-static-checks: + runs-on: ubuntu-latest + needs: [cache-build-deps] + if: github.ref != 'refs/heads/master' + steps: + + - name: Checkout code + uses: actions/checkout@v3 + with: + # needed for git commit history + fetch-depth: 0 + + - name: check-branch-ubuntu-daily-spread + run: | + # Compare the daily system in master and in the current branch + wget -q -O test_master.yaml https://raw.githubusercontent.com/snapcore/snapd/master/.github/workflows/test.yaml + system_daily="$(yq '.jobs.spread.strategy.matrix.include.[] | select(.group == "ubuntu-daily") | .systems' test_master.yaml)" + current_daily="$(yq '.jobs.spread.strategy.matrix.include.[] | select(.group == "ubuntu-daily") | .systems' .github/workflows/test.yaml)" + test "$system_daily" == "$current_daily" + shell: bash + unit-tests: needs: [static-checks] runs-on: ubuntu-22.04 @@ -260,7 +281,6 @@ - snapd_debug - withbootassetstesting - nosecboot - - nobolt - faultinject steps: @@ -351,12 +371,6 @@ # ${{ github.workspace }}/bin/govendor remove +unused SKIP_DIRTY_CHECK=1 GO_BUILD_TAGS=nosecboot ./run-checks --unit - - name: Test Go (nobolt) - if: ${{ matrix.unit-scenario == 'nobolt' }} - run: | - cd ${{ github.workspace }}/src/github.com/snapcore/snapd || exit 1 - SKIP_DIRTY_CHECK=1 GO_BUILD_TAGS=nobolt ./run-checks --unit - - name: Test Go (faultinject) if: ${{ matrix.unit-scenario == 'faultinject' }} run: | @@ -787,4 +801,4 @@ uses: actions/cache/save@v3 with: path: "${{ github.workspace }}/.test-results" - key: "${{ github.job }}-results-${{ github.run_id }}-${{ matrix.system }}-${{ github.run_attempt }}" \ No newline at end of file + key: "${{ github.job }}-results-${{ github.run_id }}-${{ matrix.system }}-${{ github.run_attempt }}" diff -Nru snapd-2.62+23.10/.golangci.yml snapd-2.63+23.10/.golangci.yml --- snapd-2.62+23.10/.golangci.yml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/.golangci.yml 2024-04-24 00:00:39.000000000 +0000 @@ -83,7 +83,7 @@ # [deprecated] comma-separated list of pairs of the form pkg:regex # the regex is used to ignore names within pkg. (default "fmt:.*"). # see https://github.com/kisielk/errcheck#the-deprecated-method for details - ignore: fmt:.*,io/ioutil:^Read.* + ignore: fmt:.* # path to a file containing a list of functions to exclude from checking # see https://github.com/kisielk/errcheck#excluding-functions for details diff -Nru snapd-2.62+23.10/NEWS.md snapd-2.63+23.10/NEWS.md --- snapd-2.62+23.10/NEWS.md 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/NEWS.md 2024-04-24 00:00:39.000000000 +0000 @@ -1,3 +1,33 @@ +# New in snapd 2.63: +* Support for snap services to show the current status of user services (experimental) +* Refresh app awareness: record snap-run-inhibit notice when starting app from snap that is busy with refresh (experimental) +* Refresh app awareness: use warnings as fallback for desktop notifications (experimental) +* Aspect based configuration: make request fields in the aspect-bundle's rules optional (experimental) +* Aspect based configuration: make map keys conform to the same format as path sub-keys (experimental) +* Aspect based configuration: make unset and set behaviour similar to configuration options (experimental) +* Aspect based configuration: limit nesting level for setting value (experimental) +* Components: use symlinks to point active snap component revisions +* Components: add model assertion support for components +* Components: fix to ensure local component installation always gets a new revision number +* Add basic support for a CIFS remote filesystem-based home directory +* Add support for AppArmor profile kill mode to avoid snap-confine error +* Allow more than one interface to grant access to the same API endpoint or notice type +* Allow all snapd service's control group processes to send systemd notifications to prevent warnings flooding the log +* Enable not preseeded single boot install +* Update secboot to handle new sbatlevel +* Fix to not use cgroup for non-strict confined snaps (devmode, classic) +* Fix two race conditions relating to freedesktop notifications +* Fix missing tunables in snap-update-ns AppArmor template +* Fix rejection of snapd snap udev command line by older host snap-device-helper +* Rework seccomp allow/deny list +* Clean up files removed by gadgets +* Remove non-viable boot chains to avoid secboot failure +* posix_mq interface: add support for missing time64 mqueue syscalls mq_timedreceive_time64 and mq_timedsend_time64 +* password-manager-service interface: allow kwalletd version 6 +* kubernetes-support interface: allow SOCK_SEQPACKET sockets +* system-observe interface: allow listing systemd units and their properties +* opengl interface: enable use of nvidia container toolkit CDI config generation + # New in snapd 2.62: * Aspects based configuration schema support (experimental) * Refresh app awareness support for UI (experimental) diff -Nru snapd-2.62+23.10/advisor/backend_bolt.go snapd-2.63+23.10/advisor/backend_bolt.go --- snapd-2.62+23.10/advisor/backend_bolt.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/advisor/backend_bolt.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,4 @@ // -*- Mode: Go; indent-tabs-mode: t -*- -//go:build !nobolt /* * Copyright (C) 2018-2024 Canonical Ltd @@ -26,7 +25,7 @@ "path/filepath" "time" - "github.com/snapcore/bolt" + "go.etcd.io/bbolt" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/osutil" @@ -40,10 +39,10 @@ type writer struct { fn string - db *bolt.DB - tx *bolt.Tx - cmdBucket *bolt.Bucket - pkgBucket *bolt.Bucket + db *bbolt.DB + tx *bbolt.Tx + cmdBucket *bbolt.Bucket + pkgBucket *bbolt.Bucket } // Create opens the commands database for writing, and starts a @@ -57,7 +56,7 @@ fn: dirs.SnapCommandsDB + "." + randutil.RandomString(12) + "~", } - t.db, err = bolt.Open(t.fn, 0644, &bolt.Options{Timeout: 1 * time.Second}) + t.db, err = bbolt.Open(t.fn, 0644, &bbolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, err } @@ -178,7 +177,7 @@ // DumpCommands returns the whole database as a map. For use in // testing and debugging. func DumpCommands() (map[string]string, error) { - db, err := bolt.Open(dirs.SnapCommandsDB, 0644, &bolt.Options{ + db, err := bbolt.Open(dirs.SnapCommandsDB, 0644, &bbolt.Options{ ReadOnly: true, Timeout: 1 * time.Second, }) @@ -208,7 +207,7 @@ } type boltFinder struct { - *bolt.DB + *bbolt.DB } // Open the database for reading. @@ -220,7 +219,7 @@ if !osutil.FileExists(dirs.SnapCommandsDB) { return nil, os.ErrNotExist } - db, err := bolt.Open(dirs.SnapCommandsDB, 0644, &bolt.Options{ + db, err := bbolt.Open(dirs.SnapCommandsDB, 0644, &bbolt.Options{ ReadOnly: true, Timeout: 1 * time.Second, }) diff -Nru snapd-2.62+23.10/advisor/backend_nobolt.go snapd-2.63+23.10/advisor/backend_nobolt.go --- snapd-2.62+23.10/advisor/backend_nobolt.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/advisor/backend_nobolt.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -//go:build nobolt - -// -*- Mode: Go; indent-tabs-mode: t -*- -/* - * Copyright (C) 2024 Canonical Ltd - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 3 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -package advisor - -// Create fails with ErrNotSupported. -func Create() (CommandDB, error) { - return nil, ErrNotSupported -} - -// DumpCommands fails with ErrNotSupported. -func DumpCommands() (map[string]string, error) { - return nil, ErrNotSupported -} - -// Open fails with ErrNotSupported. -func Open() (Finder, error) { - return nil, ErrNotSupported -} diff -Nru snapd-2.62+23.10/advisor/cmdfinder_test.go snapd-2.63+23.10/advisor/cmdfinder_test.go --- snapd-2.62+23.10/advisor/cmdfinder_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/advisor/cmdfinder_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,4 @@ // -*- Mode: Go; indent-tabs-mode: t -*- -//go:build !nobolt /* * Copyright (C) 2018 Canonical Ltd diff -Nru snapd-2.62+23.10/advisor/pkgfinder_test.go snapd-2.63+23.10/advisor/pkgfinder_test.go --- snapd-2.62+23.10/advisor/pkgfinder_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/advisor/pkgfinder_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,4 @@ // -*- Mode: Go; indent-tabs-mode: t -*- -//go:build !nobolt /* * Copyright (C) 2018 Canonical Ltd diff -Nru snapd-2.62+23.10/aspects/aspects.go snapd-2.63+23.10/aspects/aspects.go --- snapd-2.62+23.10/aspects/aspects.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/aspects/aspects.go 2024-04-24 00:00:39.000000000 +0000 @@ -269,16 +269,6 @@ return nil, errors.New("each aspect rule should be a map") } - requestRaw, ok := ruleMap["request"] - if !ok || requestRaw == "" { - return nil, errors.New(`aspect rules must have a "request" field`) - } - - request, ok := requestRaw.(string) - if !ok { - return nil, errors.New(`"request" must be a string`) - } - storageRaw, ok := ruleMap["storage"] if !ok || storageRaw == "" { return nil, errors.New(`aspect rules must have a "storage" field`) @@ -289,6 +279,19 @@ return nil, errors.New(`"storage" must be a string`) } + requestRaw, ok := ruleMap["request"] + if !ok { + // if omitted the "request" field defaults to the same as the "storage" + requestRaw = storage + } else if requestRaw == "" { + return nil, errors.New(`aspect rules' "request" field must be non-empty, if it exists`) + } + + request, ok := requestRaw.(string) + if !ok { + return nil, errors.New(`"request" must be a string`) + } + if err := validateRequestStoragePair(request, storage); err != nil { return nil, err } @@ -441,12 +444,58 @@ value interface{} } +// maxValueDepth is the limit on a value's nestedness. Creating a highly nested +// JSON value only requires a few bytes per level, but when recursively traversing +// such a value, each level requires about 2Kb stack. Prevent excessive stack +// usage by limiting the recursion depth. +var maxValueDepth = 64 + +// validateSetValue checks that map keys conform to the same format as path sub-keys. +func validateSetValue(v interface{}, depth int) error { + if depth > maxValueDepth { + return fmt.Errorf("value cannot have more than %d nested levels", maxValueDepth) + } + + var nestedVals []interface{} + switch typedVal := v.(type) { + case map[string]interface{}: + for k, v := range typedVal { + if !validSubkey.Match([]byte(k)) { + return fmt.Errorf(`key %q doesn't conform to required format: %s`, k, validSubkey.String()) + } + + nestedVals = append(nestedVals, v) + } + + case []interface{}: + nestedVals = typedVal + } + + for _, v := range nestedVals { + if v == nil { + // the value can be nil (used to unset values for compatibility w/ options) + continue + } + + if err := validateSetValue(v, depth+1); err != nil { + return err + } + } + + return nil +} + // Set sets the named aspect to a specified non-nil value. func (a *Aspect) Set(databag DataBag, request string, value interface{}) error { if err := validateAspectDottedPath(request, nil); err != nil { return badRequestErrorFrom(a, "set", request, err.Error()) } + depth := 1 + if err := validateSetValue(value, depth); err != nil { + return badRequestErrorFrom(a, "set", request, err.Error()) + } + if value == nil { return fmt.Errorf("internal error: Set value cannot be nil") } @@ -1331,25 +1380,48 @@ // If the value is nil, the entry is deleted. func (s JSONDataBag) Set(path string, value interface{}) error { subKeys := strings.Split(path, ".") - _, err := set(subKeys, 0, s, value) + + var err error + if value != nil { + _, err = set(subKeys, 0, s, value) + } else { + _, err = unset(subKeys, 0, s) + } + return err } +func removeNilValues(value interface{}) interface{} { + level, ok := value.(map[string]interface{}) + if !ok { + return value + } + + for k, v := range level { + if v == nil { + delete(level, k) + continue + } + + level[k] = removeNilValues(v) + } + + return level +} + func set(subKeys []string, index int, node map[string]json.RawMessage, value interface{}) (json.RawMessage, error) { key := subKeys[index] if index == len(subKeys)-1 { + // remove nil values that may be nested in the value + value = removeNilValues(value) + data, err := json.Marshal(value) if err != nil { return nil, err } node[key] = data - newData, err := json.Marshal(node) - if err != nil { - return nil, err - } - - return newData, nil + return json.Marshal(node) } rawLevel, ok := node[key] @@ -1391,15 +1463,12 @@ matchAll := isPlaceholder(key) if index == len(subKeys)-1 { - if !matchAll { - delete(node, key) - } - - if matchAll || len(node) == 0 { + if matchAll { // remove entire level return nil, nil } + delete(node, key) return json.Marshal(node) } @@ -1441,10 +1510,6 @@ } } - if len(node) == 0 { - return nil, nil - } - return json.Marshal(node) } diff -Nru snapd-2.62+23.10/aspects/aspects_test.go snapd-2.63+23.10/aspects/aspects_test.go --- snapd-2.62+23.10/aspects/aspects_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/aspects/aspects_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -68,15 +68,20 @@ }, { bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{}}}}, - err: `cannot define aspect "bar": aspect rules must have a "request" field`, + err: `cannot define aspect "bar": aspect rules must have a "storage" field`, + }, + + { + bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{"request": "foo", "storage": 1}}}}, + err: `cannot define aspect "bar": "storage" must be a string`, }, { - bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{"request": 1}}}}, + bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{"storage": "foo", "request": 1}}}}, err: `cannot define aspect "bar": "request" must be a string`, }, { - bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{"request": "foo"}}}}, - err: `cannot define aspect "bar": aspect rules must have a "storage" field`, + bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{"storage": "foo", "request": ""}}}}, + err: `cannot define aspect "bar": aspect rules' "request" field must be non-empty, if it exists`, }, { bundle: map[string]interface{}{"bar": map[string]interface{}{"rules": []interface{}{map[string]interface{}{"request": "foo", "storage": 1}}}}, @@ -121,6 +126,33 @@ } } +func (s *aspectSuite) TestMissingRequestDefaultsToStorage(c *C) { + databag := aspects.NewJSONDataBag() + bundle := map[string]interface{}{ + "foo": map[string]interface{}{ + "rules": []interface{}{ + map[string]interface{}{"storage": "a.b"}, + }, + }, + } + bun, err := aspects.NewBundle("acc", "foo", bundle, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + asp := bun.Aspect("foo") + c.Assert(asp, NotNil) + + err = asp.Set(databag, "a.b", "value") + c.Assert(err, IsNil) + + value, err := asp.Get(databag, "") + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, map[string]interface{}{ + "a": map[string]interface{}{ + "b": "value", + }, + }) +} + func (s *aspectSuite) TestBundleWithSample(c *C) { bundle := map[string]interface{}{ "wifi-setup": map[string]interface{}{ @@ -634,7 +666,7 @@ c.Assert(err, testutil.ErrorIs, &aspects.NotFoundError{}) } -func (s *aspectSuite) TestAspectUnsetLeafUnsetsParent(c *C) { +func (s *aspectSuite) TestAspectUnsetLeafLeavesEmptyParent(c *C) { databag := aspects.NewJSONDataBag() aspectBundle, err := aspects.NewBundle("acc", "foo", map[string]interface{}{ "my-aspect": map[string]interface{}{ @@ -657,8 +689,9 @@ err = aspect.Unset(databag, "bar") c.Assert(err, IsNil) - _, err = aspect.Get(databag, "foo") - c.Assert(err, testutil.ErrorIs, &aspects.NotFoundError{}) + value, err = aspect.Get(databag, "foo") + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, map[string]interface{}{}) } func (s *aspectSuite) TestAspectUnsetAlreadyUnsetEntry(c *C) { @@ -1832,7 +1865,7 @@ "one": "value", "two": "other", }, - // should be completely removed (only has a "one" path) + // the nested value should be removed, leaving an empty map "b": map[string]interface{}{ "one": "value", }, @@ -1852,6 +1885,7 @@ "a": map[string]interface{}{ "two": "other", }, + "b": map[string]interface{}{}, "c": map[string]interface{}{ "two": "value", }, @@ -2232,3 +2266,205 @@ c.Assert(err, IsNil) c.Assert(val, Equals, "value") } + +func (*aspectSuite) TestAspectInvalidMapKeys(c *C) { + bundle, err := aspects.NewBundle("acc", "foo", map[string]interface{}{ + "bar": map[string]interface{}{ + "rules": []interface{}{ + map[string]interface{}{ + "request": "foo", + "storage": "foo", + }, + }, + }, + }, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + databag := aspects.NewJSONDataBag() + asp := bundle.Aspect("bar") + + type testcase struct { + value interface{} + invalidKey string + } + + tcs := []testcase{ + { + value: map[string]interface{}{"-foo": 2}, + invalidKey: "-foo", + }, + { + value: map[string]interface{}{"foo--bar": 2}, + invalidKey: "foo--bar", + }, + { + value: map[string]interface{}{"foo-": 2}, + invalidKey: "foo-", + }, + { + value: map[string]interface{}{"foo": map[string]interface{}{"-bar": 2}}, + invalidKey: "-bar", + }, + { + value: map[string]interface{}{"foo": map[string]interface{}{"bar": map[string]interface{}{"baz-": 2}}}, + invalidKey: "baz-", + }, + { + value: []interface{}{map[string]interface{}{"foo": 2}, map[string]interface{}{"bar-": 2}}, + invalidKey: "bar-", + }, + { + value: []interface{}{nil, map[string]interface{}{"bar-": 2}}, + invalidKey: "bar-", + }, + { + value: map[string]interface{}{"foo": nil, "bar": map[string]interface{}{"-baz": 2}}, + invalidKey: "-baz", + }, + } + + for _, tc := range tcs { + cmt := Commentf("expected invalid key err for value: %v", tc.value) + err = asp.Set(databag, "foo", tc.value) + c.Assert(err, ErrorMatches, fmt.Sprintf("cannot set \"foo\" in aspect acc/foo/bar: key %q doesn't conform to required format: .*", tc.invalidKey), cmt) + } +} + +func (s *aspectSuite) TestSetUsingMapWithNilValuesAtLeaves(c *C) { + databag := aspects.NewJSONDataBag() + aspectBundle, err := aspects.NewBundle("acc", "bundle", map[string]interface{}{ + "foo": map[string]interface{}{ + "rules": []interface{}{ + map[string]interface{}{"request": "foo", "storage": "foo"}, + map[string]interface{}{"request": "foo.a", "storage": "foo.a"}, + map[string]interface{}{"request": "foo.b", "storage": "foo.b"}, + }, + }, + }, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + asp := aspectBundle.Aspect("foo") + c.Assert(asp, NotNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "a": "value", + "b": "other", + }) + c.Assert(err, IsNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "a": nil, + "b": nil, + }) + c.Assert(err, IsNil) + + value, err := asp.Get(databag, "foo") + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, map[string]interface{}{}) +} + +func (s *aspectSuite) TestSetWithMultiplePathsNestedAtLeaves(c *C) { + databag := aspects.NewJSONDataBag() + aspectBundle, err := aspects.NewBundle("acc", "bundle", map[string]interface{}{ + "foo": map[string]interface{}{ + "rules": []interface{}{ + map[string]interface{}{"request": "foo.a", "storage": "foo.a"}, + map[string]interface{}{"request": "foo.b", "storage": "foo.b"}, + }, + }, + }, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + asp := aspectBundle.Aspect("foo") + c.Assert(asp, NotNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "a": map[string]interface{}{ + "c": "value", + "d": "other", + }, + "b": "other", + }) + c.Assert(err, IsNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "a": map[string]interface{}{ + "d": nil, + }, + "b": nil, + }) + c.Assert(err, IsNil) + + value, err := asp.Get(databag, "foo") + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, map[string]interface{}{ + // consistent with the previous configuration mechanism + "a": map[string]interface{}{}, + }) +} + +func (s *aspectSuite) TestSetWithNilAndNonNilLeaves(c *C) { + databag := aspects.NewJSONDataBag() + aspectBundle, err := aspects.NewBundle("acc", "bundle", map[string]interface{}{ + "foo": map[string]interface{}{ + "rules": []interface{}{ + map[string]interface{}{"request": "foo", "storage": "foo"}, + }, + }, + }, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + asp := aspectBundle.Aspect("foo") + c.Assert(asp, NotNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "a": "value", + "b": "other", + }) + c.Assert(err, IsNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "a": nil, + "c": "value", + }) + c.Assert(err, IsNil) + + value, err := asp.Get(databag, "foo") + c.Assert(err, IsNil) + // nil values aren't stored but non-nil values are + c.Assert(value, DeepEquals, map[string]interface{}{ + "c": "value", + }) +} + +func (*aspectSuite) TestSetEnforcesNestednessLimit(c *C) { + restore := aspects.MockMaxValueDepth(2) + defer restore() + + bundle, err := aspects.NewBundle("acc", "foo", map[string]interface{}{ + "bar": map[string]interface{}{ + "rules": []interface{}{ + map[string]interface{}{ + "request": "foo", + "storage": "foo", + }, + }, + }, + }, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + databag := aspects.NewJSONDataBag() + asp := bundle.Aspect("bar") + + err = asp.Set(databag, "foo", map[string]interface{}{ + "bar": "baz", + }) + c.Assert(err, IsNil) + + err = asp.Set(databag, "foo", map[string]interface{}{ + "bar": map[string]interface{}{ + "baz": "value", + }, + }) + c.Assert(err, ErrorMatches, `cannot set "foo" in aspect acc/foo/bar: value cannot have more than 2 nested levels`) +} diff -Nru snapd-2.62+23.10/aspects/export_test.go snapd-2.63+23.10/aspects/export_test.go --- snapd-2.62+23.10/aspects/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/aspects/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,3 +20,11 @@ package aspects var GetValuesThroughPaths = getValuesThroughPaths + +func MockMaxValueDepth(newDepth int) (restore func()) { + oldDepth := maxValueDepth + maxValueDepth = newDepth + return func() { + maxValueDepth = oldDepth + } +} diff -Nru snapd-2.62+23.10/aspects/schema.go snapd-2.63+23.10/aspects/schema.go --- snapd-2.62+23.10/aspects/schema.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/aspects/schema.go 2024-04-24 00:00:39.000000000 +0000 @@ -509,7 +509,7 @@ func validMapKeys(v map[string]json.RawMessage) error { for k := range v { if !validSubkey.Match([]byte(k)) { - return fmt.Errorf(`key %q doesn't conform to required format`, k) + return fmt.Errorf(`key %q doesn't conform to required format: %s`, k, validSubkey.String()) } } diff -Nru snapd-2.62+23.10/aspects/schema_test.go snapd-2.63+23.10/aspects/schema_test.go --- snapd-2.62+23.10/aspects/schema_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/aspects/schema_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -466,7 +466,7 @@ }`) _, err := aspects.ParseSchema(schemaStr) - c.Assert(err, ErrorMatches, `cannot parse map: key "-foo" doesn't conform to required format`) + c.Assert(err, ErrorMatches, `cannot parse map: key "-foo" doesn't conform to required format: .*`) } func (*schemaSuite) TestMapRejectsInputMapWithInvalidKeyFormat(c *C) { @@ -483,7 +483,7 @@ "-foo": 1 }`) err = schema.Validate(input) - c.Assert(err, ErrorMatches, `cannot accept top level element: key "-foo" doesn't conform to required format`) + c.Assert(err, ErrorMatches, `cannot accept top level element: key "-foo" doesn't conform to required format: .*`) } func (*schemaSuite) TestMapInvalidConstraintCombos(c *C) { diff -Nru snapd-2.62+23.10/asserts/database_test.go snapd-2.63+23.10/asserts/database_test.go --- snapd-2.62+23.10/asserts/database_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/asserts/database_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "encoding/base64" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -137,7 +136,7 @@ c.Check(info.Mode().Perm(), Equals, os.FileMode(0600)) // secret // too much "clear box" testing? ok at least until we have // more functionality - privKey, err := ioutil.ReadFile(keyPath) + privKey, err := os.ReadFile(keyPath) c.Assert(err, IsNil) privKeyFromDisk, err := asserts.DecodePrivateKeyInTest(privKey) diff -Nru snapd-2.62+23.10/asserts/fsentryutils.go snapd-2.63+23.10/asserts/fsentryutils.go --- snapd-2.62+23.10/asserts/fsentryutils.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/asserts/fsentryutils.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" @@ -66,7 +65,7 @@ func readEntry(top string, subpath ...string) ([]byte, error) { fpath := filepath.Join(top, filepath.Join(subpath...)) - return ioutil.ReadFile(fpath) + return os.ReadFile(fpath) } func removeEntry(top string, subpath ...string) error { diff -Nru snapd-2.62+23.10/asserts/model.go snapd-2.63+23.10/asserts/model.go --- snapd-2.62+23.10/asserts/model.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/asserts/model.go 2024-04-24 00:00:39.000000000 +0000 @@ -31,6 +31,15 @@ "github.com/snapcore/snapd/strutil" ) +// ModelComponent holds details for components specified by a model assertion. +type ModelComponent struct { + // Presence can be optional or required + Presence string + // Modes is an optional list of modes, which must be a subset + // of the ones for the snap + Modes []string +} + // TODO: for ModelSnap // * consider moving snap.Type out of snap and using it in ModelSnap // but remember assertions use "core" (never "os") for TypeOS @@ -55,6 +64,8 @@ // Classic indicates that this classic snap is intentionally // included in a classic model Classic bool + // Components is a map of component names to ModelComponent + Components map[string]ModelComponent } // SnapName implements naming.SnapRef. @@ -119,7 +130,7 @@ if !ok { return nil, fmt.Errorf(wrongHeaderType) } - modelSnap, err := checkModelSnap(snap, grade, modelIsClassic) + modelSnap, err := checkModelSnap(snap, base, grade, modelIsClassic) if err != nil { return nil, err } @@ -137,54 +148,31 @@ seenIDs[snapID] = modelSnap.Name } - essential := false switch { case modelSnap.SnapType == "snapd": // TODO: allow to be explicit only in grade: dangerous? - essential = true if modelSnaps.snapd != nil { return nil, fmt.Errorf("cannot specify multiple snapd snaps: %q and %q", modelSnaps.snapd.Name, modelSnap.Name) } modelSnaps.snapd = modelSnap case modelSnap.SnapType == "kernel": - essential = true if modelSnaps.kernel != nil { return nil, fmt.Errorf("cannot specify multiple kernel snaps: %q and %q", modelSnaps.kernel.Name, modelSnap.Name) } modelSnaps.kernel = modelSnap case modelSnap.SnapType == "gadget": - essential = true if modelSnaps.gadget != nil { return nil, fmt.Errorf("cannot specify multiple gadget snaps: %q and %q", modelSnaps.gadget.Name, modelSnap.Name) } modelSnaps.gadget = modelSnap case modelSnap.Name == base: - essential = true if modelSnap.SnapType != "base" { return nil, fmt.Errorf(`boot base %q must specify type "base", not %q`, base, modelSnap.SnapType) } modelSnaps.base = modelSnap } - if essential { - if len(modelSnap.Modes) != 0 || modelSnap.Presence != "" { - return nil, fmt.Errorf("essential snaps are always available, cannot specify modes or presence for snap %q", modelSnap.Name) - } - modelSnap.Modes = essentialSnapModes - } - - if len(modelSnap.Modes) == 0 { - modelSnap.Modes = defaultModes - } - if modelSnap.Classic && (len(modelSnap.Modes) != 1 || modelSnap.Modes[0] != "run") { - return nil, fmt.Errorf("classic snap %q not allowed outside of run mode: %v", modelSnap.Name, modelSnap.Modes) - } - - if modelSnap.Presence == "" { - modelSnap.Presence = "required" - } - - if !essential { + if !isEssentialSnap(modelSnap.Name, modelSnap.SnapType, base) { modelSnaps.snapsNoEssential = append(modelSnaps.snapsNoEssential, modelSnap) } } @@ -198,7 +186,38 @@ validSnapPresences = []string{"required", "optional"} ) -func checkModelSnap(snap map[string]interface{}, grade ModelGrade, modelIsClassic bool) (*ModelSnap, error) { +func isEssentialSnap(snapName, snapType, modelBase string) bool { + switch snapType { + case "snapd", "kernel", "gadget": + return true + } + if snapName == modelBase { + return true + } + return false +} + +func checkModesForSnap(snap map[string]interface{}, isEssential bool, what string) ([]string, error) { + modes, err := checkStringListInMap(snap, "modes", fmt.Sprintf("%q %s", "modes", what), + validSnapMode) + if err != nil { + return nil, err + } + if isEssential { + if len(modes) != 0 { + return nil, fmt.Errorf("essential snaps are always available, cannot specify modes %s", what) + } + modes = essentialSnapModes + } + + if len(modes) == 0 { + modes = defaultModes + } + + return modes, nil +} + +func checkModelSnap(snap map[string]interface{}, modelBase string, grade ModelGrade, modelIsClassic bool) (*ModelSnap, error) { name, err := checkNotEmptyStringWhat(snap, "name", "of snap") if err != nil { return nil, err @@ -236,7 +255,22 @@ return nil, fmt.Errorf("type of snap %q must be one of %s", name, strings.Join(validSnapTypes, "|")) } - modes, err := checkStringListInMap(snap, "modes", fmt.Sprintf("%q %s", "modes", what), validSnapMode) + presence, err := checkOptionalStringWhat(snap, "presence", what) + if err != nil { + return nil, err + } + if presence != "" && !strutil.ListContains(validSnapPresences, presence) { + return nil, fmt.Errorf("presence of snap %q must be one of required|optional", name) + } + essential := isEssentialSnap(name, typ, modelBase) + if essential && presence != "" { + return nil, fmt.Errorf("essential snaps are always available, cannot specify presence for snap %q", name) + } + if presence == "" { + presence = "required" + } + + modes, err := checkModesForSnap(snap, essential, what) if err != nil { return nil, err } @@ -256,14 +290,6 @@ return nil, fmt.Errorf("default channel for snap %q must specify a track", name) } - presence, err := checkOptionalStringWhat(snap, "presence", what) - if err != nil { - return nil, err - } - if presence != "" && !strutil.ListContains(validSnapPresences, presence) { - return nil, fmt.Errorf("presence of snap %q must be one of required|optional", name) - } - isClassic, err := checkOptionalBoolWhat(snap, "classic", what) if err != nil { return nil, err @@ -274,6 +300,15 @@ if isClassic && typ != "app" { return nil, fmt.Errorf("snap %q cannot be classic with type %q instead of app", name, typ) } + if isClassic && (len(modes) != 1 || modes[0] != "run") { + return nil, fmt.Errorf("classic snap %q not allowed outside of run mode: %v", + name, modes) + } + + components, err := checkComponentsForMaps(snap, modes, what) + if err != nil { + return nil, err + } return &ModelSnap{ Name: name, @@ -283,9 +318,96 @@ DefaultChannel: defaultChannel, Presence: presence, // can be empty Classic: isClassic, + Components: components, // can be empty }, nil } +// This is what we expect for components: +/** +snaps: + - name: + ... + presence: "optional"|"required" # optional, defaults to "required" + modes: [] # list of modes + components: # optional + : + presence: "optional"|"required" + modes: [] # list of modes, optional + # must be a subset of snap modes + # defaults to the same modes + # as the snap + : "required"|"optional" # presence, shortcut syntax +**/ +func checkComponentsForMaps(m map[string]interface{}, validModes []string, what string) (map[string]ModelComponent, error) { + const compsField = "components" + value, ok := m[compsField] + if !ok { + return nil, nil + } + comps, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%q %s must be a map from strings to components", + compsField, what) + } + + res := make(map[string]ModelComponent, len(comps)) + for name, comp := range comps { + // Name of component follows the same rules as snap components + if err := naming.ValidateSnap(name); err != nil { + return nil, fmt.Errorf("invalid component name %s", name) + } + + // "comp: required|optional" case + compWhat := fmt.Sprintf("of component %q %s", name, what) + presence, ok := comp.(string) + if ok { + if !strutil.ListContains(validSnapPresences, presence) { + return nil, fmt.Errorf("presence %s must be one of required|optional", compWhat) + } + res[name] = ModelComponent{Presence: presence, + Modes: append([]string(nil), validModes...)} + continue + } + + // try map otherwise + compFields, ok := comp.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%s must be a map of strings to components or one of required|optional", + compWhat) + } + // Error out if unexpected entry + for key := range compFields { + if !strutil.ListContains([]string{"presence", "modes"}, key) { + return nil, fmt.Errorf("entry %q %s is unknown", key, compWhat) + } + } + presence, err := checkNotEmptyStringWhat(compFields, "presence", compWhat) + if err != nil { + return nil, err + } + if !strutil.ListContains(validSnapPresences, presence) { + return nil, fmt.Errorf("presence %s must be one of required|optional", compWhat) + } + modes, err := checkStringListInMap(compFields, "modes", + fmt.Sprintf("modes %s", compWhat), validSnapMode) + if err != nil { + return nil, err + } + if len(modes) == 0 { + modes = append([]string(nil), validModes...) + } else { + for _, m := range modes { + if !strutil.ListContains(validModes, m) { + return nil, fmt.Errorf("mode %q %s is incompatible with the snap modes", m, compWhat) + } + } + } + res[name] = ModelComponent{Presence: presence, Modes: modes} + } + + return res, nil +} + // unextended case support func checkSnapWithTrack(headers map[string]interface{}, which string) (*ModelSnap, error) { diff -Nru snapd-2.62+23.10/asserts/model_test.go snapd-2.63+23.10/asserts/model_test.go --- snapd-2.62+23.10/asserts/model_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/asserts/model_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -198,6 +198,77 @@ "sign-key-sha3-384: Jv8_JiHiIzJVcO9M55pPdqSDWUvuhfDIBJUS-3VW7F_idjix7Ffn5qMxB21ZQuij" + "\n\n" + "AXNpZw==" + + coreModelWithComponentsExample = `type: model +authority-id: brand-id1 +series: 16 +brand-id: brand-id1 +model: baz-3000 +display-name: Baz 3000 +architecture: amd64 +system-user-authority: * +base: core24 +store: brand-store +snaps: + - + name: baz-linux + id: bazlinuxidididididididididididid + type: kernel + default-channel: 20 + - + name: brand-gadget + id: brandgadgetdidididididididididid + type: gadget + - + name: other-base + id: otherbasedididididididididididid + type: base + presence: required + - + name: nm + id: nmididididididididididididididid + modes: + - ephemeral + - run + default-channel: 1.0 + components: + comp1: + presence: optional + modes: + - ephemeral + comp2: required + - + name: myapp + id: myappdididididididididididididid + type: app + default-channel: 2.0 + presence: optional + modes: + - ephemeral + - run + components: + comp1: + presence: optional + modes: + - ephemeral + - run + comp2: required + - + name: myappopt + id: myappoptidididididididididididid + type: app + presence: required + components: + comp1: + presence: optional + comp2: required +OTHERgrade: secured +storage-safety: encrypted +` + "TSLINE" + + "body-length: 0\n" + + "sign-key-sha3-384: Jv8_JiHiIzJVcO9M55pPdqSDWUvuhfDIBJUS-3VW7F_idjix7Ffn5qMxB21ZQuij" + + "\n\n" + + "AXNpZw==" ) func (mods *modelSuite) TestDecodeOK(c *C) { @@ -1037,10 +1108,10 @@ {"OTHER", " -\n name: myapp2\n id: myappdididididididididididididid\n", `cannot specify the same snap id "myappdididididididididididididid" multiple times, specified for snaps "myapp" and "myapp2"`}, {"OTHER", " -\n name: kernel2\n id: kernel2didididididididididididid\n type: kernel\n", `cannot specify multiple kernel snaps: "baz-linux" and "kernel2"`}, {"OTHER", " -\n name: gadget2\n id: gadget2didididididididididididid\n type: gadget\n", `cannot specify multiple gadget snaps: "brand-gadget" and "gadget2"`}, - {"type: gadget\n", "type: gadget\n presence: required\n", `essential snaps are always available, cannot specify modes or presence for snap "brand-gadget"`}, - {"type: gadget\n", "type: gadget\n modes:\n - run\n", `essential snaps are always available, cannot specify modes or presence for snap "brand-gadget"`}, - {"type: kernel\n", "type: kernel\n presence: required\n", `essential snaps are always available, cannot specify modes or presence for snap "baz-linux"`}, - {"OTHER", " -\n name: core20\n id: core20ididididididididididididid\n type: base\n presence: optional\n", `essential snaps are always available, cannot specify modes or presence for snap "core20"`}, + {"type: gadget\n", "type: gadget\n presence: required\n", `essential snaps are always available, cannot specify presence for snap "brand-gadget"`}, + {"type: gadget\n", "type: gadget\n modes:\n - run\n", `essential snaps are always available, cannot specify modes of snap "brand-gadget"`}, + {"type: kernel\n", "type: kernel\n presence: required\n", `essential snaps are always available, cannot specify presence for snap "baz-linux"`}, + {"OTHER", " -\n name: core20\n id: core20ididididididididididididid\n type: base\n presence: optional\n", `essential snaps are always available, cannot specify presence for snap "core20"`}, {"OTHER", " -\n name: core20\n id: core20ididididididididididididid\n type: app\n", `boot base "core20" must specify type "base", not "app"`}, {"OTHER", "kernel: foo\n", `cannot specify separate "kernel" header once using the extended snaps header`}, {"OTHER", "gadget: foo\n", `cannot specify separate "gadget" header once using the extended snaps header`}, @@ -1433,3 +1504,230 @@ c.Check(model.AllSnaps(), DeepEquals, allSnaps) } + +func (mods *modelSuite) TestDecodeWithComponentsOK(c *C) { + encoded := strings.Replace(coreModelWithComponentsExample, "TSLINE", mods.tsLine, 1) + encoded = strings.Replace(encoded, "OTHER", "", 1) + a, err := asserts.Decode([]byte(encoded)) + c.Assert(err, IsNil) + + c.Check(a.Type(), Equals, asserts.ModelType) + model := a.(*asserts.Model) + c.Check(model.AuthorityID(), Equals, "brand-id1") + c.Check(model.Timestamp(), Equals, mods.ts) + c.Check(model.Series(), Equals, "16") + c.Check(model.BrandID(), Equals, "brand-id1") + c.Check(model.Model(), Equals, "baz-3000") + c.Check(model.DisplayName(), Equals, "Baz 3000") + c.Check(model.Architecture(), Equals, "amd64") + c.Check(model.GadgetSnap(), DeepEquals, &asserts.ModelSnap{ + Name: "brand-gadget", + SnapID: "brandgadgetdidididididididididid", + SnapType: "gadget", + Modes: []string{"run", "ephemeral"}, + Presence: "required", + DefaultChannel: "latest/stable", + }) + c.Check(model.Gadget(), Equals, "brand-gadget") + c.Check(model.GadgetTrack(), Equals, "") + c.Check(model.KernelSnap(), DeepEquals, &asserts.ModelSnap{ + Name: "baz-linux", + SnapID: "bazlinuxidididididididididididid", + SnapType: "kernel", + Modes: []string{"run", "ephemeral"}, + Presence: "required", + DefaultChannel: "20", + }) + c.Check(model.Kernel(), Equals, "baz-linux") + c.Check(model.KernelTrack(), Equals, "") + c.Check(model.Base(), Equals, "core24") + c.Check(model.BaseSnap(), DeepEquals, &asserts.ModelSnap{ + Name: "core24", + SnapID: "dwTAh7MZZ01zyriOZErqd1JynQLiOGvM", + SnapType: "base", + Modes: []string{"run", "ephemeral"}, + Presence: "required", + DefaultChannel: "latest/stable", + }) + c.Check(model.Store(), Equals, "brand-store") + c.Check(model.Grade(), Equals, asserts.ModelSecured) + c.Check(model.StorageSafety(), Equals, asserts.StorageSafetyEncrypted) + essentialSnaps := model.EssentialSnaps() + c.Check(essentialSnaps, DeepEquals, []*asserts.ModelSnap{ + model.KernelSnap(), + model.BaseSnap(), + model.GadgetSnap(), + }) + snaps := model.SnapsWithoutEssential() + c.Check(snaps, DeepEquals, []*asserts.ModelSnap{ + { + Name: "other-base", + SnapID: "otherbasedididididididididididid", + SnapType: "base", + Modes: []string{"run"}, + DefaultChannel: "latest/stable", + Presence: "required", + }, + { + Name: "nm", + SnapID: "nmididididididididididididididid", + SnapType: "app", + Modes: []string{"ephemeral", "run"}, + DefaultChannel: "1.0", + Presence: "required", + Components: map[string]asserts.ModelComponent{ + "comp1": { + Presence: "optional", + Modes: []string{"ephemeral"}, + }, + "comp2": { + Presence: "required", + Modes: []string{"ephemeral", "run"}, + }, + }, + }, + { + Name: "myapp", + SnapID: "myappdididididididididididididid", + SnapType: "app", + Modes: []string{"ephemeral", "run"}, + DefaultChannel: "2.0", + Presence: "optional", + Components: map[string]asserts.ModelComponent{ + "comp1": { + Presence: "optional", + Modes: []string{"ephemeral", "run"}, + }, + "comp2": { + Presence: "required", + Modes: []string{"ephemeral", "run"}, + }, + }, + }, + { + Name: "myappopt", + SnapID: "myappoptidididididididididididid", + SnapType: "app", + DefaultChannel: "latest/stable", + Presence: "required", + Modes: []string{"run"}, + Components: map[string]asserts.ModelComponent{ + "comp1": { + Presence: "optional", + Modes: []string{"run"}, + }, + "comp2": { + Presence: "required", + Modes: []string{"run"}, + }, + }, + }, + }) + + c.Check(model.SystemUserAuthority(), HasLen, 0) + c.Check(model.SerialAuthority(), DeepEquals, []string{"brand-id1"}) + c.Check(model.PreseedAuthority(), DeepEquals, []string{"brand-id1"}) +} + +func (mods *modelSuite) TestDecodeWithComponentsBadPresence1(c *C) { + encoded := strings.Replace(coreModelWithComponentsExample, "TSLINE", mods.tsLine, 1) + encoded = strings.Replace(encoded, "OTHER", ` - + name: somesnap + id: somesnapidididididididididididid + type: app + presence: required + components: + comp1: badpresenceval +`, 1) + a, err := asserts.Decode([]byte(encoded)) + c.Assert(err.Error(), Equals, `assertion model: presence of component "comp1" of snap "somesnap" must be one of required|optional`) + c.Assert(a, IsNil) +} + +func (mods *modelSuite) TestDecodeWithComponentsBadPresence2(c *C) { + encoded := strings.Replace(coreModelWithComponentsExample, "TSLINE", mods.tsLine, 1) + encoded = strings.Replace(encoded, "OTHER", ` - + name: somesnap + id: somesnapidididididididididididid + type: app + presence: required + components: + comp1: + presence: badpresenceval +`, 1) + a, err := asserts.Decode([]byte(encoded)) + c.Assert(err.Error(), Equals, `assertion model: presence of component "comp1" of snap "somesnap" must be one of required|optional`) + c.Assert(a, IsNil) +} + +func (mods *modelSuite) TestDecodeWithComponentsBadMode(c *C) { + encoded := strings.Replace(coreModelWithComponentsExample, "TSLINE", mods.tsLine, 1) + encoded = strings.Replace(encoded, "OTHER", ` - + name: somesnap + id: somesnapidididididididididididid + type: app + presence: required + modes: + - run + components: + comp1: + presence: required + modes: + - ephemeral +`, 1) + a, err := asserts.Decode([]byte(encoded)) + c.Assert(err.Error(), Equals, `assertion model: mode "ephemeral" of component "comp1" of snap "somesnap" is incompatible with the snap modes`) + c.Assert(a, IsNil) +} + +func (mods *modelSuite) TestDecodeWithComponentsBadContent(c *C) { + for i, tc := range []struct { + compsEntry string + errMsg string + }{ + {` components: + - comp1 + - comp2 +`, + `assertion model: "components" of snap "somesnap" must be a map from strings to components`}, + {` components: + comp_1: required +`, + `parsing assertion headers: invalid map entry key: "comp_1"`}, + {` components: + comp1: + presence: required + other: something +`, + `assertion model: entry "other" of component "comp1" of snap "somesnap" is unknown`}, + {` components: + comp1: + modes: + - run +`, + `assertion model: "presence" of component "comp1" of snap "somesnap" is mandatory`, + }, + {` components: + comp1: + presence: required + modes: + - foomode +`, + `assertion model: mode "foomode" of component "comp1" of snap "somesnap" is incompatible with the snap modes`, + }, + } { + c.Logf("test %d: %q", i, tc.compsEntry) + encoded := strings.Replace(coreModelWithComponentsExample, "TSLINE", mods.tsLine, 1) + encoded = strings.Replace(encoded, "OTHER", ` - + name: somesnap + id: somesnapidididididididididididid + type: app + presence: required + modes: + - run +`+tc.compsEntry, 1) + a, err := asserts.Decode([]byte(encoded)) + c.Assert(err.Error(), Equals, tc.errMsg) + c.Assert(a, IsNil) + } +} diff -Nru snapd-2.62+23.10/boot/assets.go snapd-2.63+23.10/boot/assets.go --- snapd-2.62+23.10/boot/assets.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/assets.go 2024-04-24 00:00:39.000000000 +0000 @@ -616,6 +616,9 @@ // content return gadget.ChangeAbort, fmt.Errorf("cannot reuse asset name %q", ta.name) } + // The order of assets is important. Changing it would + // change assumptions in + // bootAssetsToLoadChains (*trustedAssets)[ta.name] = append((*trustedAssets)[ta.name], ta.hash) } diff -Nru snapd-2.62+23.10/boot/assets_test.go snapd-2.63+23.10/boot/assets_test.go --- snapd-2.62+23.10/boot/assets_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/assets_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -2554,22 +2554,22 @@ case 1: c.Check(mp.EFILoadChains, DeepEquals, []*secboot.LoadChain{ secboot.NewLoadChain(shimBf, - secboot.NewLoadChain(assetBf, - secboot.NewLoadChain(recoveryKernelBf)), secboot.NewLoadChain(beforeAssetBf, + secboot.NewLoadChain(recoveryKernelBf)), + secboot.NewLoadChain(assetBf, secboot.NewLoadChain(recoveryKernelBf))), secboot.NewLoadChain(shimBf, - secboot.NewLoadChain(assetBf, - secboot.NewLoadChain(runKernelBf)), secboot.NewLoadChain(beforeAssetBf, + secboot.NewLoadChain(runKernelBf)), + secboot.NewLoadChain(assetBf, secboot.NewLoadChain(runKernelBf))), }) case 2: c.Check(mp.EFILoadChains, DeepEquals, []*secboot.LoadChain{ secboot.NewLoadChain(shimBf, - secboot.NewLoadChain(assetBf, - secboot.NewLoadChain(recoveryKernelBf)), secboot.NewLoadChain(beforeAssetBf, + secboot.NewLoadChain(recoveryKernelBf)), + secboot.NewLoadChain(assetBf, secboot.NewLoadChain(recoveryKernelBf))), }) default: diff -Nru snapd-2.62+23.10/boot/bootchain.go snapd-2.63+23.10/boot/bootchain.go --- snapd-2.62+23.10/boot/bootchain.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/bootchain.go 2024-04-24 00:00:39.000000000 +0000 @@ -105,30 +105,15 @@ return false } -func toPredictableBootAsset(b *bootAsset) *bootAsset { - if b == nil { - return nil - } - newB := *b - if b.Hashes != nil { - newB.Hashes = make([]string, len(b.Hashes)) - copy(newB.Hashes, b.Hashes) - sort.Strings(newB.Hashes) - } - return &newB -} - func toPredictableBootChain(b *bootChain) *bootChain { if b == nil { return nil } newB := *b - if b.AssetChain != nil { - newB.AssetChain = make([]bootAsset, len(b.AssetChain)) - for i := range b.AssetChain { - newB.AssetChain[i] = *toPredictableBootAsset(&b.AssetChain[i]) - } - } + // AssetChain is sorted list (by boot order) of sorted list (old to new asset). + // So it is already predictable and we can keep it the way it is. + + // However we still need to sort kernel KernelCmdlines if b.KernelCmdlines != nil { newB.KernelCmdlines = make([]string, len(b.KernelCmdlines)) copy(newB.KernelCmdlines, b.KernelCmdlines) @@ -257,7 +242,13 @@ // bootAssetsToLoadChains generates a list of load chains covering given boot // assets sequence. At the end of each chain, adds an entry for the kernel boot // file. -func bootAssetsToLoadChains(assets []bootAsset, kernelBootFile bootloader.BootFile, roleToBlName map[bootloader.Role]string) ([]*secboot.LoadChain, error) { +// We do not calculate some boot chains because they are impossible as +// when we update assets we write first the binaries that are used +// later, that is, if updating both shim and grub, the new grub is +// copied first to the disk, so booting from the new shim to the old +// grub is not possible. This is controlled by expectNew, that tells +// us that the previous step in the chain is from a new asset. +func bootAssetsToLoadChains(assets []bootAsset, kernelBootFile bootloader.BootFile, roleToBlName map[bootloader.Role]string, expectNew bool) ([]*secboot.LoadChain, error) { // kernel is added after all the assets addKernelBootFile := len(assets) == 0 if addKernelBootFile { @@ -270,7 +261,24 @@ return nil, fmt.Errorf("internal error: no bootloader name for boot asset role %q", thisAsset.Role) } var chains []*secboot.LoadChain - for _, hash := range thisAsset.Hashes { + + for i, hash := range thisAsset.Hashes { + // There should be 1 or 2 assets, and their position has a meaning. + // See TrustedAssetsUpdateObserver.observeUpdate + if i == 0 { + // i == 0 means currently installed asset. + // We do not expect this asset to be used as + // we have new assets earlier in the chain + if len(thisAsset.Hashes) == 2 && expectNew { + continue + } + } else if i == 1 { + // i == 1 means new asset + } else { + // If there is a second asset, it is the next asset to be installed + return nil, fmt.Errorf("internal error: did not expect more than 2 hashes for %s", thisAsset.Name) + } + var bf bootloader.BootFile var next []*secboot.LoadChain var err error @@ -286,7 +294,7 @@ p, thisAsset.Role, ) - next, err = bootAssetsToLoadChains(assets[1:], kernelBootFile, roleToBlName) + next, err = bootAssetsToLoadChains(assets[1:], kernelBootFile, roleToBlName, expectNew || i == 1) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/boot/bootchain_test.go snapd-2.63+23.10/boot/bootchain_test.go --- snapd-2.62+23.10/boot/bootchain_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/bootchain_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -68,28 +68,6 @@ } } -func (s *bootchainSuite) TestBootAssetsPredictable(c *C) { - // by role - ba := boot.BootAsset{ - Role: bootloader.RoleRunMode, Name: "list", Hashes: []string{"b", "a"}, - } - pred := boot.ToPredictableBootAsset(&ba) - c.Check(pred, DeepEquals, &boot.BootAsset{ - Role: bootloader.RoleRunMode, Name: "list", Hashes: []string{"a", "b"}, - }) - // original structure is not changed - c.Check(ba, DeepEquals, boot.BootAsset{ - Role: bootloader.RoleRunMode, Name: "list", Hashes: []string{"b", "a"}, - }) - - // try to make a predictable struct predictable once more - predAgain := boot.ToPredictableBootAsset(pred) - c.Check(predAgain, DeepEquals, pred) - - baNil := boot.ToPredictableBootAsset(nil) - c.Check(baNil, IsNil) -} - func (s *bootchainSuite) TestBootChainMarshalOnlyAssets(c *C) { pbNil := boot.ToPredictableBootChain(nil) c.Check(pbNil, IsNil) @@ -111,10 +89,10 @@ AssetChain: []boot.BootAsset{ // hash lists are sorted {Role: bootloader.RoleRecovery, Name: "shim", Hashes: []string{"b"}}, - {Role: bootloader.RoleRecovery, Name: "loader", Hashes: []string{"d", "e"}}, - {Role: bootloader.RoleRunMode, Name: "loader", Hashes: []string{"c", "d"}}, - {Role: bootloader.RoleRunMode, Name: "1oader", Hashes: []string{"d", "e"}}, - {Role: bootloader.RoleRunMode, Name: "0oader", Hashes: []string{"x", "z"}}, + {Role: bootloader.RoleRecovery, Name: "loader", Hashes: []string{"e", "d"}}, + {Role: bootloader.RoleRunMode, Name: "loader", Hashes: []string{"d", "c"}}, + {Role: bootloader.RoleRunMode, Name: "1oader", Hashes: []string{"e", "d"}}, + {Role: bootloader.RoleRunMode, Name: "0oader", Hashes: []string{"z", "x"}}, }, }) @@ -162,7 +140,7 @@ // assets are not reordered AssetChain: []boot.BootAsset{ // hash lists are sorted - {Role: bootloader.RoleRecovery, Name: "shim", Hashes: []string{"a", "b"}}, + {Role: bootloader.RoleRecovery, Name: "shim", Hashes: []string{"b", "a"}}, {Role: bootloader.RoleRecovery, Name: "loader", Hashes: []string{"d"}}, {Role: bootloader.RoleRunMode, Name: "loader", Hashes: []string{"c", "d"}}, }, @@ -178,7 +156,7 @@ d, err := json.Marshal(predictableBc) c.Assert(err, IsNil) - c.Check(string(d), Equals, `{"brand-id":"mybrand","model":"foo","grade":"dangerous","model-sign-key-id":"my-key-id","asset-chain":[{"role":"recovery","name":"shim","hashes":["a","b"]},{"role":"recovery","name":"loader","hashes":["d"]},{"role":"run-mode","name":"loader","hashes":["c","d"]}],"kernel":"pc-kernel","kernel-revision":"1234","kernel-cmdlines":["a=1","foo=bar baz=0x123"]}`) + c.Check(string(d), Equals, `{"brand-id":"mybrand","model":"foo","grade":"dangerous","model-sign-key-id":"my-key-id","asset-chain":[{"role":"recovery","name":"shim","hashes":["b","a"]},{"role":"recovery","name":"loader","hashes":["d"]},{"role":"run-mode","name":"loader","hashes":["c","d"]}],"kernel":"pc-kernel","kernel-revision":"1234","kernel-cmdlines":["a=1","foo=bar baz=0x123"]}`) expectedOriginal := &boot.BootChain{ BrandID: "mybrand", Model: "foo", @@ -356,7 +334,7 @@ `snapd_recovery_mode=recover snapd_recovery_system=23 foo`, }, "asset-chain": []interface{}{ - map[string]interface{}{"role": "recovery", "name": "shim", "hashes": []interface{}{"x", "y"}}, + map[string]interface{}{"role": "recovery", "name": "shim", "hashes": []interface{}{"y", "x"}}, map[string]interface{}{"role": "recovery", "name": "loader", "hashes": []interface{}{"c", "d"}}, }, }, { @@ -368,9 +346,9 @@ "kernel-revision": "1234", "kernel-cmdlines": []interface{}{"snapd_recovery_mode=run foo"}, "asset-chain": []interface{}{ - map[string]interface{}{"role": "recovery", "name": "shim", "hashes": []interface{}{"x", "y"}}, + map[string]interface{}{"role": "recovery", "name": "shim", "hashes": []interface{}{"y", "x"}}, map[string]interface{}{"role": "recovery", "name": "loader", "hashes": []interface{}{"c", "d"}}, - map[string]interface{}{"role": "run-mode", "name": "loader", "hashes": []interface{}{"a", "b"}}, + map[string]interface{}{"role": "run-mode", "name": "loader", "hashes": []interface{}{"b", "a"}}, }, }, { "model": "foo", @@ -383,7 +361,7 @@ "asset-chain": []interface{}{ map[string]interface{}{"role": "recovery", "name": "shim", "hashes": []interface{}{"x", "y"}}, map[string]interface{}{"role": "recovery", "name": "loader", "hashes": []interface{}{"c", "d"}}, - map[string]interface{}{"role": "run-mode", "name": "loader", "hashes": []interface{}{"x", "z"}}, + map[string]interface{}{"role": "run-mode", "name": "loader", "hashes": []interface{}{"z", "x"}}, }, }, }) @@ -617,7 +595,7 @@ Grade: "dangerous", ModelSignKeyID: "key-1", AssetChain: []boot.BootAsset{ - {Hashes: []string{"a", "b"}}, + {Hashes: []string{"b", "a"}}, }, Kernel: "foo", KernelCmdlines: []string{`panic=1`}, @@ -643,7 +621,7 @@ }, }, { AssetChain: []boot.BootAsset{ - {Hashes: []string{"a", "b"}}, + {Hashes: []string{"b", "a"}}, {Hashes: []string{"c", "d"}}, }, }, @@ -994,7 +972,7 @@ func (s *bootchainSuite) TestBootAssetsToLoadChainTrivialKernel(c *C) { kbl := bootloader.NewBootFile("pc-kernel", "kernel.efi", bootloader.RoleRunMode) - chains, err := boot.BootAssetsToLoadChains(nil, kbl, nil) + chains, err := boot.BootAssetsToLoadChains(nil, kbl, nil, false) c.Assert(err, IsNil) c.Check(chains, DeepEquals, []*secboot.LoadChain{ @@ -1016,7 +994,7 @@ // missing bootloader name for role "run-mode" } // fails when probing the shim asset in the cache - chains, err := boot.BootAssetsToLoadChains(assets, kbl, blNames) + chains, err := boot.BootAssetsToLoadChains(assets, kbl, blNames, false) c.Assert(err, ErrorMatches, "file .*/recovery-bl/shim-hash0 not found in boot assets cache") c.Check(chains, IsNil) // make it work now @@ -1024,7 +1002,7 @@ c.Assert(os.WriteFile(cPath("recovery-bl/shim-hash0"), nil, 0644), IsNil) // nested error bubbled up - chains, err = boot.BootAssetsToLoadChains(assets, kbl, blNames) + chains, err = boot.BootAssetsToLoadChains(assets, kbl, blNames, false) c.Assert(err, ErrorMatches, "file .*/recovery-bl/loader-recovery-hash0 not found in boot assets cache") c.Check(chains, IsNil) // again, make it work @@ -1032,7 +1010,7 @@ c.Assert(os.WriteFile(cPath("recovery-bl/loader-recovery-hash0"), nil, 0644), IsNil) // fails on missing bootloader name for role "run-mode" - chains, err = boot.BootAssetsToLoadChains(assets, kbl, blNames) + chains, err = boot.BootAssetsToLoadChains(assets, kbl, blNames, false) c.Assert(err, ErrorMatches, `internal error: no bootloader name for boot asset role "run-mode"`) c.Check(chains, IsNil) } @@ -1062,7 +1040,7 @@ bootloader.RoleRunMode: "run-bl", } - chains, err := boot.BootAssetsToLoadChains(assets, kbl, blNames) + chains, err := boot.BootAssetsToLoadChains(assets, kbl, blNames, false) c.Assert(err, IsNil) c.Logf("got:") @@ -1104,7 +1082,7 @@ bootloader.RoleRecovery: "recovery-bl", bootloader.RoleRunMode: "run-bl", } - chains, err := boot.BootAssetsToLoadChains(assets, kbl, blNames) + chains, err := boot.BootAssetsToLoadChains(assets, kbl, blNames, false) c.Assert(err, IsNil) c.Logf("got:") @@ -1120,19 +1098,10 @@ secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash1"), bootloader.RoleRunMode), secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode)))), secboot.NewLoadChain(nbf("", cPath("recovery-bl/loader-recovery-hash1"), bootloader.RoleRecovery), - secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash0"), bootloader.RoleRunMode), - secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode))), secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash1"), bootloader.RoleRunMode), secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode))))), secboot.NewLoadChain(nbf("", cPath("recovery-bl/shim-hash1"), bootloader.RoleRecovery), - secboot.NewLoadChain(nbf("", cPath("recovery-bl/loader-recovery-hash0"), bootloader.RoleRecovery), - secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash0"), bootloader.RoleRunMode), - secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode))), - secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash1"), bootloader.RoleRunMode), - secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode)))), secboot.NewLoadChain(nbf("", cPath("recovery-bl/loader-recovery-hash1"), bootloader.RoleRecovery), - secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash0"), bootloader.RoleRunMode), - secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode))), secboot.NewLoadChain(nbf("", cPath("run-bl/loader-run-hash1"), bootloader.RoleRunMode), secboot.NewLoadChain(nbf("pc-kernel", "kernel.efi", bootloader.RoleRunMode))))), } @@ -1179,7 +1148,7 @@ rootdir := c.MkDir() - expected := `{"reseal-count":0,"boot-chains":[{"brand-id":"mybrand","model":"foo","grade":"dangerous","model-sign-key-id":"my-key-id","asset-chain":[{"role":"recovery","name":"shim","hashes":["x","y"]},{"role":"recovery","name":"loader","hashes":["c","d"]}],"kernel":"pc-kernel-recovery","kernel-revision":"1234","kernel-cmdlines":["snapd_recovery_mode=recover foo"]},{"brand-id":"mybrand","model":"foo","grade":"signed","model-sign-key-id":"my-key-id","asset-chain":[{"role":"recovery","name":"shim","hashes":["x","y"]},{"role":"recovery","name":"loader","hashes":["c","d"]},{"role":"run-mode","name":"loader","hashes":["x","z"]}],"kernel":"pc-kernel-other","kernel-revision":"2345","kernel-cmdlines":["snapd_recovery_mode=run foo"]}]} + expected := `{"reseal-count":0,"boot-chains":[{"brand-id":"mybrand","model":"foo","grade":"dangerous","model-sign-key-id":"my-key-id","asset-chain":[{"role":"recovery","name":"shim","hashes":["y","x"]},{"role":"recovery","name":"loader","hashes":["c","d"]}],"kernel":"pc-kernel-recovery","kernel-revision":"1234","kernel-cmdlines":["snapd_recovery_mode=recover foo"]},{"brand-id":"mybrand","model":"foo","grade":"signed","model-sign-key-id":"my-key-id","asset-chain":[{"role":"recovery","name":"shim","hashes":["x","y"]},{"role":"recovery","name":"loader","hashes":["c","d"]},{"role":"run-mode","name":"loader","hashes":["z","x"]}],"kernel":"pc-kernel-other","kernel-revision":"2345","kernel-cmdlines":["snapd_recovery_mode=run foo"]}]} ` // creates a complete tree and writes a file err := boot.WriteBootChains(pbc, filepath.Join(dirs.SnapFDEDirUnder(rootdir), "boot-chains"), 0) diff -Nru snapd-2.62+23.10/boot/export_test.go snapd-2.63+23.10/boot/export_test.go --- snapd-2.62+23.10/boot/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -176,7 +176,6 @@ ) var ( - ToPredictableBootAsset = toPredictableBootAsset ToPredictableBootChain = toPredictableBootChain ToPredictableBootChains = toPredictableBootChains PredictableBootChainsEqualForReseal = predictableBootChainsEqualForReseal diff -Nru snapd-2.62+23.10/boot/flags.go snapd-2.63+23.10/boot/flags.go --- snapd-2.62+23.10/boot/flags.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/flags.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -224,7 +223,7 @@ // bootenv are for this boot or the next one, but the initramfs will always // copy the flags that were set into /run, so we always know the current // boot's flags are written in /run - b, err := ioutil.ReadFile(snapBootFlagsFile) + b, err := os.ReadFile(snapBootFlagsFile) if err != nil { return nil, err } @@ -314,7 +313,7 @@ // host mount is snap-bootstrap's /run/snapd/snap-bootstrap/degraded.json, so // we have to go parse that degradedJSONFile := filepath.Join(dirs.SnapBootstrapRunDir, "degraded.json") - b, err := ioutil.ReadFile(degradedJSONFile) + b, err := os.ReadFile(degradedJSONFile) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/boot/modeenv_test.go snapd-2.63+23.10/boot/modeenv_test.go --- snapd-2.62+23.10/boot/modeenv_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/modeenv_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -209,7 +208,7 @@ err = dupDiskModeenv.Write() c.Assert(err, IsNil) c.Assert(dirs.SnapModeenvFileUnder(s.tmpdir), testutil.FilePresent) - origBytes, err := ioutil.ReadFile(dirs.SnapModeenvFileUnder(s.tmpdir) + ".orig") + origBytes, err := os.ReadFile(dirs.SnapModeenvFileUnder(s.tmpdir) + ".orig") c.Assert(err, IsNil) // the files should be the same c.Assert(dirs.SnapModeenvFileUnder(s.tmpdir), testutil.FileEquals, string(origBytes)) diff -Nru snapd-2.62+23.10/boot/seal.go snapd-2.63+23.10/boot/seal.go --- snapd-2.62+23.10/boot/seal.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/seal.go 2024-04-24 00:00:39.000000000 +0000 @@ -913,7 +913,8 @@ for _, bc := range pbc { modelForSealing := bc.modelForSealing() modelID := modelUniqueID(modelForSealing) - loadChains, err := bootAssetsToLoadChains(bc.AssetChain, bc.kernelBootFile, roleToBlName) + const expectNew = false + loadChains, err := bootAssetsToLoadChains(bc.AssetChain, bc.kernelBootFile, roleToBlName, expectNew) if err != nil { return nil, fmt.Errorf("cannot build load chains with current boot assets: %s", err) } diff -Nru snapd-2.62+23.10/boot/seal_test.go snapd-2.63+23.10/boot/seal_test.go --- snapd-2.62+23.10/boot/seal_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/boot/seal_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -717,17 +717,6 @@ runGrub2, possibleRunKernel, }) - if shimId2 == shimId && grubId2 == grubId { - // due to bugs in ordering, sometimes grub2 is old and grub is new, so we need to test - // the case shim2 -> grub -> runGrub - // this happens only when we do not change the id of the asset (that is the paths are not changed) - possibleChains = append(possibleChains, []bootloader.BootFile{ - shim2, - grub, - runGrub, - possibleRunKernel, - }) - } } } else if shimId2 != "" { // We should not test the case where we half update, to a completely new bootchain. diff -Nru snapd-2.62+23.10/bootloader/assets/genasset/main_test.go snapd-2.63+23.10/bootloader/assets/genasset/main_test.go --- snapd-2.62+23.10/bootloader/assets/genasset/main_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/assets/genasset/main_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -84,7 +83,7 @@ c.Assert(err, IsNil) err = generate.Run("asset-name", filepath.Join(d, "in"), filepath.Join(d, "out")) c.Assert(err, IsNil) - data, err := ioutil.ReadFile(filepath.Join(d, "out")) + data, err := os.ReadFile(filepath.Join(d, "out")) c.Assert(err, IsNil) const exp = `// -*- Mode: Go; indent-tabs-mode: t -*- diff -Nru snapd-2.62+23.10/bootloader/assets/grub_test.go snapd-2.63+23.10/bootloader/assets/grub_test.go --- snapd-2.62+23.10/bootloader/assets/grub_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/assets/grub_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "fmt" - "io/ioutil" + "os" "testing" . "gopkg.in/check.v1" @@ -160,7 +160,7 @@ } { assetData := assets.Internal(tc.asset) c.Assert(assetData, NotNil) - data, err := ioutil.ReadFile(tc.file) + data, err := os.ReadFile(tc.file) c.Assert(err, IsNil) c.Check(assetData, DeepEquals, data, Commentf("asset %q has not been updated", tc.asset)) } diff -Nru snapd-2.62+23.10/bootloader/efi/efi.go snapd-2.63+23.10/bootloader/efi/efi.go --- snapd-2.62+23.10/bootloader/efi/efi.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/efi/efi.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,7 +26,6 @@ "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "unicode/utf16" @@ -113,7 +112,7 @@ return nil, 0, cannotReadError(name, err) } defer varf.Close() - b, err := ioutil.ReadAll(varf) + b, err := io.ReadAll(varf) if err != nil { return nil, 0, cannotReadError(name, err) } @@ -173,7 +172,7 @@ if !ok { attr = VariableRuntimeAccess | VariableBootServiceAccess } - return ioutil.NopCloser(bytes.NewBuffer(val)), attr, int64(len(val)), nil + return io.NopCloser(bytes.NewBuffer(val)), attr, int64(len(val)), nil } return nil, 0, 0, fmt.Errorf("EFI variable %s not mocked", name) } diff -Nru snapd-2.62+23.10/bootloader/grubenv/grubenv.go snapd-2.63+23.10/bootloader/grubenv/grubenv.go --- snapd-2.62+23.10/bootloader/grubenv/grubenv.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/grubenv/grubenv.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "bytes" "fmt" - "io/ioutil" "os" "github.com/snapcore/snapd/strutil" @@ -56,7 +55,7 @@ } func (g *Env) Load() error { - buf, err := ioutil.ReadFile(g.path) + buf, err := os.ReadFile(g.path) if err != nil { return err } diff -Nru snapd-2.62+23.10/bootloader/lk.go snapd-2.63+23.10/bootloader/lk.go --- snapd-2.62+23.10/bootloader/lk.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/lk.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -444,7 +443,7 @@ // this is live system, extracted bootimg needs to be flashed to // free bootimg partition and env has to be updated with // new kernel snap to bootimg partition mapping - tmpdir, err := ioutil.TempDir("", "bootimg") + tmpdir, err := os.MkdirTemp("", "bootimg") if err != nil { return fmt.Errorf("cannot create temp directory: %v", err) } diff -Nru snapd-2.62+23.10/bootloader/lk_test.go snapd-2.63+23.10/bootloader/lk_test.go --- snapd-2.62+23.10/bootloader/lk_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/lk_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -259,7 +258,7 @@ c.Assert(err, IsNil) // just boot.img and snapbootsel.bin are there, no kernel.img - infos, err := ioutil.ReadDir(filepath.Join(s.rootdir, "boot", "lk", "")) + infos, err := os.ReadDir(filepath.Join(s.rootdir, "boot", "lk", "")) c.Assert(err, IsNil) var fnames []string for _, info := range infos { @@ -367,13 +366,13 @@ // and validate it went to the "boot_a" partition bootA := filepath.Join(s.rootdir, "/dev/disk/by-partlabel/boot_a") - content, err := ioutil.ReadFile(bootA) + content, err := os.ReadFile(bootA) c.Assert(err, IsNil) c.Assert(string(content), Equals, "I'm the default boot image name") // also validate that bootB is empty bootB := filepath.Join(s.rootdir, "/dev/disk/by-partlabel/boot_b") - content, err = ioutil.ReadFile(bootB) + content, err = os.ReadFile(bootB) c.Assert(err, IsNil) c.Assert(content, HasLen, 0) @@ -451,7 +450,7 @@ bootAPartUUID, err := disk.FindMatchingPartitionUUIDWithPartLabel("boot_a") c.Assert(err, IsNil) bootA := filepath.Join(s.rootdir, "/dev/disk/by-partuuid", bootAPartUUID) - content, err := ioutil.ReadFile(bootA) + content, err := os.ReadFile(bootA) c.Assert(err, IsNil) c.Assert(string(content), Equals, "I'm the default boot image name") @@ -459,7 +458,7 @@ bootBPartUUID, err := disk.FindMatchingPartitionUUIDWithPartLabel("boot_b") c.Assert(err, IsNil) bootB := filepath.Join(s.rootdir, "/dev/disk/by-partuuid", bootBPartUUID) - content, err = ioutil.ReadFile(bootB) + content, err = os.ReadFile(bootB) c.Assert(err, IsNil) c.Assert(content, HasLen, 0) diff -Nru snapd-2.62+23.10/bootloader/piboot.go snapd-2.63+23.10/bootloader/piboot.go --- snapd-2.62+23.10/bootloader/piboot.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/piboot.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "encoding/binary" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -238,7 +237,7 @@ // Writes os_prefix in RPi config.txt or tryboot.txt func (p *piboot) writeRPiCfgWithOsPrefix(prefix, inFile, outFile string) error { - buf, err := ioutil.ReadFile(inFile) + buf, err := os.ReadFile(inFile) if err != nil { return err } @@ -268,7 +267,7 @@ } func (p *piboot) writeCmdline(env *ubootenv.Env, defaultsFile, outFile string) error { - buf, err := ioutil.ReadFile(defaultsFile) + buf, err := os.ReadFile(defaultsFile) if err != nil { return err } @@ -390,7 +389,7 @@ // To find out the EEPROM version we do the same as the // rpi-eeprom-update script (see // https://github.com/raspberrypi/rpi-eeprom/blob/master/rpi-eeprom-update) - buf, err := ioutil.ReadFile(rpi4EepromTimeStampPath) + buf, err := os.ReadFile(rpi4EepromTimeStampPath) if err != nil { return false, err } @@ -408,7 +407,7 @@ func (p *piboot) isRaspberryPi4() bool { // For RPi4 detection we do the same as the rpi-eeprom-update script (see // https://github.com/raspberrypi/rpi-eeprom/blob/master/rpi-eeprom-update) - buf, err := ioutil.ReadFile(rpi4RevisionCodesPath) + buf, err := os.ReadFile(rpi4RevisionCodesPath) if err != nil { return false } diff -Nru snapd-2.62+23.10/bootloader/piboot_test.go snapd-2.63+23.10/bootloader/piboot_test.go --- snapd-2.62+23.10/bootloader/piboot_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/piboot_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,6 @@ package bootloader_test import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -155,7 +154,7 @@ snapf, err := snapfile.Open(fn) c.Assert(err, IsNil) - assetsDir, err := ioutil.TempDir("", "kernel-assets") + assetsDir, err := os.MkdirTemp("", "kernel-assets") c.Assert(err, IsNil) defer os.RemoveAll(assetsDir) @@ -313,7 +312,7 @@ }, } for _, fInfo := range files { - readData, err := ioutil.ReadFile(fInfo.path) + readData, err := os.ReadFile(fInfo.path) c.Assert(err, IsNil) c.Assert(string(readData), Equals, fInfo.data) } @@ -347,7 +346,7 @@ }, } for _, fInfo := range files { - readData, err := ioutil.ReadFile(fInfo.path) + readData, err := os.ReadFile(fInfo.path) c.Assert(err, IsNil) c.Assert(string(readData), Equals, fInfo.data) } @@ -376,7 +375,7 @@ }, } for _, fInfo := range files { - readData, err := ioutil.ReadFile(fInfo.path) + readData, err := os.ReadFile(fInfo.path) c.Assert(err, IsNil) c.Assert(string(readData), Equals, fInfo.data) } @@ -418,7 +417,7 @@ }, } for _, fInfo := range files { - readData, err := ioutil.ReadFile(fInfo.path) + readData, err := os.ReadFile(fInfo.path) c.Assert(err, IsNil) c.Assert(string(readData), Equals, fInfo.data) } @@ -449,7 +448,7 @@ }, } for _, fInfo := range files { - readData, err := ioutil.ReadFile(fInfo.path) + readData, err := os.ReadFile(fInfo.path) c.Assert(err, IsNil) c.Assert(string(readData), Equals, fInfo.data) } @@ -487,7 +486,7 @@ }, } for _, fInfo := range files { - readData, err := ioutil.ReadFile(fInfo.path) + readData, err := os.ReadFile(fInfo.path) c.Assert(err, IsNil) c.Assert(string(readData), Equals, fInfo.data) } diff -Nru snapd-2.62+23.10/bootloader/ubootenv/env.go snapd-2.63+23.10/bootloader/ubootenv/env.go --- snapd-2.62+23.10/bootloader/ubootenv/env.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/ubootenv/env.go 2024-04-24 00:00:39.000000000 +0000 @@ -27,7 +27,6 @@ "fmt" "hash/crc32" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -110,7 +109,7 @@ } defer f.Close() - contentWithHeader, err := ioutil.ReadAll(f) + contentWithHeader, err := io.ReadAll(f) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/bootloader/ubootenv/env_test.go snapd-2.63+23.10/bootloader/ubootenv/env_test.go --- snapd-2.62+23.10/bootloader/ubootenv/env_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/ubootenv/env_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "hash/crc32" - "io/ioutil" + "io" "os" "path/filepath" "strings" @@ -302,7 +302,7 @@ r, err := os.Open(u.envFile) c.Assert(err, IsNil) defer r.Close() - content, err := ioutil.ReadAll(r) + content, err := io.ReadAll(r) c.Assert(err, IsNil) c.Assert(content, DeepEquals, []byte{ // crc @@ -330,7 +330,7 @@ r, err := os.Open(u.envFile) c.Assert(err, IsNil) defer r.Close() - content, err := ioutil.ReadAll(r) + content, err := io.ReadAll(r) c.Assert(err, IsNil) c.Assert(content, DeepEquals, []byte{ // crc @@ -360,7 +360,7 @@ r, err := os.Open(u.envFile) c.Assert(err, IsNil) defer r.Close() - content, err := ioutil.ReadAll(r) + content, err := io.ReadAll(r) c.Assert(err, IsNil) c.Assert(content, DeepEquals, []byte{ // crc @@ -399,7 +399,7 @@ r, err := os.Open(u.envFile) c.Assert(err, IsNil) defer r.Close() - content, err := ioutil.ReadAll(r) + content, err := io.ReadAll(r) c.Assert(err, IsNil) c.Assert(content, DeepEquals, []byte{ // crc diff -Nru snapd-2.62+23.10/bootloader/withbootassettesting.go snapd-2.63+23.10/bootloader/withbootassettesting.go --- snapd-2.62+23.10/bootloader/withbootassettesting.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/bootloader/withbootassettesting.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -59,7 +58,7 @@ panic(fmt.Sprintf("cannot readlink: %v", err)) } - injectPieceRaw, err := ioutil.ReadFile(filepath.Join(filepath.Dir(selfExe), "bootassetstesting")) + injectPieceRaw, err := os.ReadFile(filepath.Join(filepath.Dir(selfExe), "bootassetstesting")) if os.IsNotExist(err) { logger.Noticef("no boot asset testing marker") return diff -Nru snapd-2.62+23.10/build-aux/snap/snapcraft.yaml snapd-2.63+23.10/build-aux/snap/snapcraft.yaml --- snapd-2.62+23.10/build-aux/snap/snapcraft.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/build-aux/snap/snapcraft.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -87,6 +87,8 @@ git diff ) > $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/dirty-git-tree-info.txt fi + # copy helper for collecting debug output + cp -av debug-tools/snap-debug-info.sh $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/ # xdelta is used to enable delta downloads (even if the host does not have it) xdelta3: Binary files /tmp/tmpw5xkt6dt/fP1LgyGjUa/snapd-2.62+23.10/c-vendor/squashfuse/.git/index and /tmp/tmpw5xkt6dt/qHQIHy7qQp/snapd-2.63+23.10/c-vendor/squashfuse/.git/index differ diff -Nru snapd-2.62+23.10/c-vendor/squashfuse/.git/logs/HEAD snapd-2.63+23.10/c-vendor/squashfuse/.git/logs/HEAD --- snapd-2.62+23.10/c-vendor/squashfuse/.git/logs/HEAD 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/c-vendor/squashfuse/.git/logs/HEAD 2024-04-24 00:00:39.000000000 +0000 @@ -1,2 +1,2 @@ -0000000000000000000000000000000000000000 94f998c58d2bb6dff00173f33140a0354adce324 ernestl 1711055474 +0200 clone: from https://github.com/vasi/squashfuse -94f998c58d2bb6dff00173f33140a0354adce324 3f4dd2928ab362f8b20eab2be864d8e622472df5 ernestl 1711055474 +0200 checkout: moving from master to 3f4dd2928ab362f8b20eab2be864d8e622472df5 +0000000000000000000000000000000000000000 94f998c58d2bb6dff00173f33140a0354adce324 ernestl 1713986045 +0200 clone: from https://github.com/vasi/squashfuse +94f998c58d2bb6dff00173f33140a0354adce324 3f4dd2928ab362f8b20eab2be864d8e622472df5 ernestl 1713986045 +0200 checkout: moving from master to 3f4dd2928ab362f8b20eab2be864d8e622472df5 diff -Nru snapd-2.62+23.10/c-vendor/squashfuse/.git/logs/refs/heads/master snapd-2.63+23.10/c-vendor/squashfuse/.git/logs/refs/heads/master --- snapd-2.62+23.10/c-vendor/squashfuse/.git/logs/refs/heads/master 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/c-vendor/squashfuse/.git/logs/refs/heads/master 2024-04-24 00:00:39.000000000 +0000 @@ -1 +1 @@ -0000000000000000000000000000000000000000 94f998c58d2bb6dff00173f33140a0354adce324 ernestl 1711055474 +0200 clone: from https://github.com/vasi/squashfuse +0000000000000000000000000000000000000000 94f998c58d2bb6dff00173f33140a0354adce324 ernestl 1713986045 +0200 clone: from https://github.com/vasi/squashfuse diff -Nru snapd-2.62+23.10/c-vendor/squashfuse/.git/logs/refs/remotes/origin/HEAD snapd-2.63+23.10/c-vendor/squashfuse/.git/logs/refs/remotes/origin/HEAD --- snapd-2.62+23.10/c-vendor/squashfuse/.git/logs/refs/remotes/origin/HEAD 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/c-vendor/squashfuse/.git/logs/refs/remotes/origin/HEAD 2024-04-24 00:00:39.000000000 +0000 @@ -1 +1 @@ -0000000000000000000000000000000000000000 94f998c58d2bb6dff00173f33140a0354adce324 ernestl 1711055474 +0200 clone: from https://github.com/vasi/squashfuse +0000000000000000000000000000000000000000 94f998c58d2bb6dff00173f33140a0354adce324 ernestl 1713986045 +0200 clone: from https://github.com/vasi/squashfuse Binary files /tmp/tmpw5xkt6dt/fP1LgyGjUa/snapd-2.62+23.10/c-vendor/squashfuse/.git/objects/pack/pack-282773acae14395089027a5daa4658d0aba664e2.idx and /tmp/tmpw5xkt6dt/qHQIHy7qQp/snapd-2.63+23.10/c-vendor/squashfuse/.git/objects/pack/pack-282773acae14395089027a5daa4658d0aba664e2.idx differ Binary files /tmp/tmpw5xkt6dt/fP1LgyGjUa/snapd-2.62+23.10/c-vendor/squashfuse/.git/objects/pack/pack-282773acae14395089027a5daa4658d0aba664e2.pack and /tmp/tmpw5xkt6dt/qHQIHy7qQp/snapd-2.63+23.10/c-vendor/squashfuse/.git/objects/pack/pack-282773acae14395089027a5daa4658d0aba664e2.pack differ Binary files /tmp/tmpw5xkt6dt/fP1LgyGjUa/snapd-2.62+23.10/c-vendor/squashfuse/.git/objects/pack/pack-65330f303c36a4e35ce5078003665f1d492a8cf1.idx and /tmp/tmpw5xkt6dt/qHQIHy7qQp/snapd-2.63+23.10/c-vendor/squashfuse/.git/objects/pack/pack-65330f303c36a4e35ce5078003665f1d492a8cf1.idx differ Binary files /tmp/tmpw5xkt6dt/fP1LgyGjUa/snapd-2.62+23.10/c-vendor/squashfuse/.git/objects/pack/pack-65330f303c36a4e35ce5078003665f1d492a8cf1.pack and /tmp/tmpw5xkt6dt/qHQIHy7qQp/snapd-2.63+23.10/c-vendor/squashfuse/.git/objects/pack/pack-65330f303c36a4e35ce5078003665f1d492a8cf1.pack differ diff -Nru snapd-2.62+23.10/client/apps.go snapd-2.63+23.10/client/apps.go --- snapd-2.62+23.10/client/apps.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/apps.go 2024-04-24 00:00:39.000000000 +0000 @@ -75,6 +75,11 @@ // If Service is true, only return apps that are services // (app.IsService() is true); otherwise, return all. Service bool + // Global if set, returns only the global status of the services. This + // is only relevant for user services, where we either return the status + // of the services for the current user, or the global enable status. + // For root-users, global is always implied. + Global bool } // Apps returns information about all matching apps. Each name can be @@ -88,6 +93,9 @@ if opts.Service { q.Add("select", "service") } + if opts.Global { + q.Add("global", fmt.Sprintf("%t", opts.Global)) + } var appInfos []*AppInfo _, err := client.doSync("GET", "/v2/apps", q, nil, nil, &appInfos) diff -Nru snapd-2.62+23.10/client/apps_test.go snapd-2.63+23.10/client/apps_test.go --- snapd-2.62+23.10/client/apps_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/apps_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -65,7 +65,19 @@ return services, err } -var appcheckers = []func(*clientSuite, *check.C) ([]*client.AppInfo, error){testClientApps, testClientAppsService} +func testClientAppsGlobal(cs *clientSuite, c *check.C) ([]*client.AppInfo, error) { + services, err := cs.cli.Apps([]string{"foo", "bar"}, client.AppOptions{Global: true}) + c.Check(cs.req.URL.Path, check.Equals, "/v2/apps") + c.Check(cs.req.Method, check.Equals, "GET") + query := cs.req.URL.Query() + c.Check(query, check.HasLen, 2) + c.Check(query.Get("names"), check.Equals, "foo,bar") + c.Check(query.Get("global"), check.Equals, "true") + + return services, err +} + +var appcheckers = []func(*clientSuite, *check.C) ([]*client.AppInfo, error){testClientApps, testClientAppsService, testClientAppsGlobal} func (cs *clientSuite) TestClientServiceGetHappy(c *check.C) { expected := []*client.AppInfo{mksvc("foo", "foo"), mksvc("bar", "bar1")} diff -Nru snapd-2.62+23.10/client/aspects_test.go snapd-2.63+23.10/client/aspects_test.go --- snapd-2.62+23.10/client/aspects_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/aspects_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "encoding/json" - "io/ioutil" + "io" "net/url" . "gopkg.in/check.v1" @@ -50,7 +50,7 @@ c.Check(cs.reqs[0].Method, Equals, "PUT") c.Check(cs.reqs[0].Header.Get("Content-Type"), Equals, "application/json") c.Check(cs.reqs[0].URL.Path, Equals, "/v2/aspects/a/b/c") - data, err := ioutil.ReadAll(cs.reqs[0].Body) + data, err := io.ReadAll(cs.reqs[0].Body) c.Assert(err, IsNil) // need to decode because entries may have been encoded in any order diff -Nru snapd-2.62+23.10/client/asserts_test.go snapd-2.63+23.10/client/asserts_test.go --- snapd-2.62+23.10/client/asserts_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/asserts_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "errors" - "io/ioutil" + "io" "net/http" "net/url" @@ -41,7 +41,7 @@ a := []byte("Assertion.") err := cs.cli.Ack(a) c.Assert(err, IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, IsNil) c.Check(body, DeepEquals, a) c.Check(cs.req.Method, Equals, "POST") diff -Nru snapd-2.62+23.10/client/change_test.go snapd-2.63+23.10/client/change_test.go --- snapd-2.62+23.10/client/change_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/change_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package client_test import ( - "io/ioutil" + "io" "time" "gopkg.in/check.v1" @@ -227,7 +227,7 @@ ReadyTime: time.Date(2016, 04, 21, 1, 2, 4, 0, time.UTC), }) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Equals, "{\"action\":\"abort\"}\n") diff -Nru snapd-2.62+23.10/client/client.go snapd-2.63+23.10/client/client.go --- snapd-2.62+23.10/client/client.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/client.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,7 +26,6 @@ "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -438,7 +437,7 @@ dec := json.NewDecoder(reader) if err := dec.Decode(v); err != nil { r := dec.Buffered() - buf, err1 := ioutil.ReadAll(r) + buf, err1 := io.ReadAll(r) if err1 != nil { buf = []byte(fmt.Sprintf("error reading buffered response body: %s", err1)) } diff -Nru snapd-2.62+23.10/client/client_test.go snapd-2.63+23.10/client/client_test.go --- snapd-2.62+23.10/client/client_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/client_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/httptest" @@ -136,7 +135,7 @@ func (cs *clientSuite) TestClientWorks(c *C) { var v []int cs.rsp = `[1,2]` - reqBody := ioutil.NopCloser(strings.NewReader("")) + reqBody := io.NopCloser(strings.NewReader("")) statusCode, err := cs.cli.Do("GET", "/this", nil, reqBody, &v, nil) c.Check(err, IsNil) c.Check(statusCode, Equals, 200) @@ -188,7 +187,7 @@ var v []int cs.rsp = `[1,2]` cs.err = fmt.Errorf("borken") - reqBody := ioutil.NopCloser(strings.NewReader("")) + reqBody := io.NopCloser(strings.NewReader("")) doOpts := &client.DoOptions{ // Timeout is unset, thus 0, and thus we ignore the retry and only run // once even though there is an error @@ -202,7 +201,7 @@ func (cs *clientSuite) TestClientDoRetryValidation(c *C) { var v []int cs.rsp = `[1,2]` - reqBody := ioutil.NopCloser(strings.NewReader("")) + reqBody := io.NopCloser(strings.NewReader("")) doOpts := &client.DoOptions{ Retry: time.Duration(-1), Timeout: time.Duration(time.Minute), @@ -213,7 +212,7 @@ } func (cs *clientSuite) TestClientDoRetryWorks(c *C) { - reqBody := ioutil.NopCloser(strings.NewReader("")) + reqBody := io.NopCloser(strings.NewReader("")) cs.err = fmt.Errorf("borken") doOpts := &client.DoOptions{ Retry: time.Duration(time.Millisecond), @@ -228,7 +227,7 @@ } func (cs *clientSuite) TestClientOnlyRetryAppropriateErrors(c *C) { - reqBody := ioutil.NopCloser(strings.NewReader("")) + reqBody := io.NopCloser(strings.NewReader("")) doOpts := &client.DoOptions{ Retry: time.Millisecond, Timeout: 1 * time.Minute, @@ -251,7 +250,7 @@ var v []int cs.status = 202 cs.rsp = `[1,2]` - reqBody := ioutil.NopCloser(strings.NewReader("")) + reqBody := io.NopCloser(strings.NewReader("")) statusCode, err := cs.cli.Do("GET", "/this", nil, reqBody, &v, nil) c.Check(err, IsNil) c.Check(statusCode, Equals, 202) @@ -559,7 +558,7 @@ resp = &http.Response{ Status: "400 Bad Request", Header: h, - Body: ioutil.NopCloser(strings.NewReader(`{ + Body: io.NopCloser(strings.NewReader(`{ "status-code": 400, "type": "error", "result": { @@ -573,7 +572,7 @@ resp = &http.Response{ Status: "400 Bad Request", Header: h, - Body: ioutil.NopCloser(strings.NewReader("{}")), + Body: io.NopCloser(strings.NewReader("{}")), } err = client.ParseErrorInTest(resp) c.Check(err, ErrorMatches, `server error: "400 Bad Request"`) @@ -614,7 +613,7 @@ c.Check(cs.reqs, HasLen, 1) c.Check(cs.reqs[0].Method, Equals, "POST") c.Check(cs.reqs[0].URL.Path, Equals, "/v2/debug") - data, err := ioutil.ReadAll(cs.reqs[0].Body) + data, err := io.ReadAll(cs.reqs[0].Body) c.Assert(err, IsNil) c.Check(data, DeepEquals, []byte(`{"action":"ensure-state-soon"}`)) } @@ -629,7 +628,7 @@ c.Check(cs.reqs, HasLen, 1) c.Check(cs.reqs[0].Method, Equals, "POST") c.Check(cs.reqs[0].URL.Path, Equals, "/v2/debug") - data, err := ioutil.ReadAll(cs.reqs[0].Body) + data, err := io.ReadAll(cs.reqs[0].Body) c.Assert(err, IsNil) c.Check(string(data), DeepEquals, `{"action":"do-something","params":["param1","param2"]}`) } @@ -659,7 +658,7 @@ c.Check(cs.reqs, HasLen, 1) c.Check(cs.reqs[0].Method, Equals, "POST") c.Check(cs.reqs[0].URL.Path, Equals, "/v2/debug") - data, err := ioutil.ReadAll(cs.reqs[0].Body) + data, err := io.ReadAll(cs.reqs[0].Body) c.Assert(err, IsNil) c.Check(string(data), Equals, `{"action":"migrate-home","snaps":["foo","bar"]}`) } diff -Nru snapd-2.62+23.10/client/clientutil/service_scope.go snapd-2.63+23.10/client/clientutil/service_scope.go --- snapd-2.62+23.10/client/clientutil/service_scope.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/clientutil/service_scope.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,6 +23,8 @@ "fmt" "github.com/snapcore/snapd/client" + "github.com/snapcore/snapd/i18n" + "github.com/snapcore/snapd/snap" "github.com/snapcore/snapd/strutil" ) @@ -74,3 +76,24 @@ Names: strutil.CommaSeparatedList(us.Usernames), } } + +// FmtServiceStatus formats a given service application into the following string +// +// To keep output persistent between snapctl and snap cmd. +func FmtServiceStatus(svc *client.AppInfo, isGlobal bool) string { + startup := i18n.G("disabled") + if svc.Enabled { + startup = i18n.G("enabled") + } + + // When requesting global service status, we don't have any active + // information available for user daemons. + current := i18n.G("inactive") + if svc.DaemonScope == snap.UserDaemon && isGlobal { + current = "-" + } else if svc.Active { + current = i18n.G("active") + } + + return fmt.Sprintf("%s.%s\t%s\t%s\t%s", svc.Snap, svc.Name, startup, current, ClientAppInfoNotes(svc)) +} diff -Nru snapd-2.62+23.10/client/cohort_test.go snapd-2.63+23.10/client/cohort_test.go --- snapd-2.62+23.10/client/cohort_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/cohort_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "errors" - "io/ioutil" + "io" "golang.org/x/xerrors" "gopkg.in/check.v1" @@ -33,7 +33,7 @@ c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/cohorts") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var jsonBody map[string]interface{} err = json.Unmarshal(body, &jsonBody) @@ -57,7 +57,7 @@ "bar": "what-what", }) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var jsonBody map[string]interface{} err = json.Unmarshal(body, &jsonBody) diff -Nru snapd-2.62+23.10/client/icons.go snapd-2.63+23.10/client/icons.go --- snapd-2.62+23.10/client/icons.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/icons.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "context" "fmt" - "io/ioutil" + "io" "regexp" "golang.org/x/xerrors" @@ -58,7 +58,7 @@ return nil, fmt.Errorf("%s: cannot determine filename", errPrefix) } - content, err := ioutil.ReadAll(response.Body) + content, err := io.ReadAll(response.Body) if err != nil { return nil, fmt.Errorf("%s: %s", errPrefix, err) } diff -Nru snapd-2.62+23.10/client/model_test.go snapd-2.63+23.10/client/model_test.go --- snapd-2.62+23.10/client/model_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/model_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/json" "errors" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -127,7 +126,7 @@ c.Check(id, Equals, "d728") c.Assert(cs.req.Header.Get("Content-Type"), Equals, "application/json") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, IsNil) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -235,7 +234,7 @@ c.Assert(len(matches), Equals, 2) boundary := "--" + matches[1] - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, IsNil) expected := boundary + ` Content-Disposition: form-data; name="new-model" diff -Nru snapd-2.62+23.10/client/notices.go snapd-2.63+23.10/client/notices.go --- snapd-2.62+23.10/client/notices.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/client/notices.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,64 @@ +// Copyright (c) 2024 Canonical Ltd +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License version 3 as +// published by the Free Software Foundation. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +package client + +import ( + "bytes" + "encoding/json" +) + +type NotifyOptions struct { + // Type is the notice's type. Currently only notices of type CustomNotice + // can be added. + Type NoticeType + + // Key is the notice's key. For "custom" notices, this must be in + // "domain.com/key" format. + Key string +} + +// Notify records an occurrence of a notice with the specified options, +// returning the notice ID. +func (client *Client) Notify(opts *NotifyOptions) (string, error) { + var payload = struct { + Action string `json:"action"` + Type string `json:"type"` + Key string `json:"key"` + }{ + Action: "add", + Type: string(opts.Type), + Key: opts.Key, + } + var body bytes.Buffer + if err := json.NewEncoder(&body).Encode(&payload); err != nil { + return "", err + } + + result := struct { + ID string `json:"id"` + }{} + _, err := client.doSync("POST", "/v2/notices", nil, nil, &body, &result) + if err != nil { + return "", err + } + return result.ID, err +} + +type NoticeType string + +const ( + // SnapRunInhibitNotice is recorded when "snap run" is inhibited due refresh. + SnapRunInhibitNotice NoticeType = "snap-run-inhibit" +) diff -Nru snapd-2.62+23.10/client/notices_test.go snapd-2.63+23.10/client/notices_test.go --- snapd-2.62+23.10/client/notices_test.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/client/notices_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,50 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package client_test + +import ( + "encoding/json" + "io" + + "github.com/snapcore/snapd/client" + . "gopkg.in/check.v1" +) + +func (cs *clientSuite) TestNotify(c *C) { + cs.rsp = `{"type": "sync", "result": {"id": "7"}}` + noticeID, err := cs.cli.Notify(&client.NotifyOptions{ + Type: client.SnapRunInhibitNotice, + Key: "snap-name", + }) + c.Assert(err, IsNil) + c.Check(noticeID, Equals, "7") + c.Assert(cs.req.URL.Path, Equals, "/v2/notices") + + body, err := io.ReadAll(cs.req.Body) + c.Assert(err, IsNil) + var m map[string]any + err = json.Unmarshal(body, &m) + c.Assert(err, IsNil) + c.Assert(m, DeepEquals, map[string]any{ + "action": "add", + "type": "snap-run-inhibit", + "key": "snap-name", + }) +} diff -Nru snapd-2.62+23.10/client/quota_test.go snapd-2.63+23.10/client/quota_test.go --- snapd-2.62+23.10/client/quota_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/quota_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "encoding/json" - "io/ioutil" + "io" "time" "gopkg.in/check.v1" @@ -79,7 +79,7 @@ c.Assert(chgID, check.Equals, "42") c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/quotas") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = jsonutil.DecodeWithNumber(bytes.NewReader(body), &req) @@ -175,7 +175,7 @@ c.Assert(chgID, check.Equals, "42") c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/quotas") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) diff -Nru snapd-2.62+23.10/client/snap_op_test.go snapd-2.63+23.10/client/snap_op_test.go --- snapd-2.62+23.10/client/snap_op_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/snap_op_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "errors" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -150,7 +149,7 @@ _, ok := cs.req.Context().Deadline() c.Check(ok, check.Equals, true) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil, check.Commentf(s.action)) jsonBody := make(map[string]string) err = json.Unmarshal(body, &jsonBody) @@ -178,7 +177,7 @@ c.Assert(cs.req.Header.Get("Content-Type"), check.Equals, "application/json", check.Commentf(s.action)) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil, check.Commentf(s.action)) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -207,7 +206,7 @@ c.Assert(cs.req.Header.Get("Content-Type"), check.Equals, "application/json", check.Commentf(s.action)) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil, check.Commentf(s.action)) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -238,7 +237,7 @@ c.Assert(cs.req.Header.Get("Content-Type"), check.Equals, "application/json", check.Commentf(s.action)) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil, check.Commentf(s.action)) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -267,7 +266,7 @@ c.Assert(err, check.IsNil) c.Check(cs.req.Header.Get("Content-Type"), check.Equals, "application/json") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -296,7 +295,7 @@ id, err := cs.cli.InstallPath(snap, "", nil) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), testutil.Contains, "\r\nsnap-data\r\n") @@ -326,7 +325,7 @@ id, err := cs.cli.InstallPath(snap, "", &client.SnapOptions{IgnoreRunning: true}) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Matches, "(?s).*\r\nsnap-data\r\n.*") @@ -357,7 +356,7 @@ id, err := cs.cli.InstallPath(snap, "foo_bar", nil) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Matches, "(?s).*\r\nsnap-data\r\n.*") @@ -389,7 +388,7 @@ id, err := cs.cli.InstallPathMany(paths, nil) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) for _, name := range names { @@ -426,7 +425,7 @@ id, err := cs.cli.InstallPathMany(paths, &client.SnapOptions{Transaction: client.TransactionAllSnaps}) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) for _, name := range names { @@ -469,7 +468,7 @@ c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Check(string(body), check.Matches, `(?s).*Content-Disposition: form-data; name="dangerous"\r\n\r\ntrue\r\n.*`) @@ -500,7 +499,7 @@ c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Check(string(body), check.Matches, `(?s).*Content-Disposition: form-data; name="dangerous"\r\n\r\ntrue\r\n.*`) @@ -528,7 +527,7 @@ _, err = cs.cli.InstallPath(snap, "", &opts) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Matches, "(?s).*Content-Disposition: form-data; name=\"dangerous\"\r\n\r\ntrue\r\n.*") @@ -562,7 +561,7 @@ _, err = cs.cli.Install("foo", &opts) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -572,7 +571,7 @@ _, err = cs.cli.InstallPath(snap, "", &opts) c.Assert(err, check.IsNil) - body, err = ioutil.ReadAll(cs.req.Body) + body, err = io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Matches, "(?s).*Content-Disposition: form-data; name=\"unaliased\"\r\n\r\ntrue\r\n.*") @@ -598,7 +597,7 @@ _, err = cs.cli.InstallMany([]string{"foo", "bar"}, &opts) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) jsonBody := make(map[string]interface{}) err = json.Unmarshal(body, &jsonBody) @@ -609,7 +608,7 @@ _, err = cs.cli.InstallPath(snap, "", &opts) c.Assert(err, check.IsNil) - body, err = ioutil.ReadAll(cs.req.Body) + body, err = io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Matches, @@ -636,7 +635,7 @@ _, err = cs.cli.Install("foo", &opts) c.Assert(err, check.IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var jsonBody map[string]interface{} err = json.Unmarshal(body, &jsonBody) @@ -646,7 +645,7 @@ _, err = cs.cli.InstallPath(snap, "", &opts) c.Assert(err, check.IsNil) - body, err = ioutil.ReadAll(cs.req.Body) + body, err = io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Assert(string(body), check.Matches, "(?s).*Content-Disposition: form-data; name=\"prefer\"\r\n\r\ntrue\r\n.*") @@ -660,7 +659,7 @@ break } c.Assert(err, check.IsNil) - slurp, err := ioutil.ReadAll(p) + slurp, err := io.ReadAll(p) c.Assert(err, check.IsNil) formData[p.FormName()] = string(slurp) } @@ -779,7 +778,7 @@ // check we posted the right stuff c.Assert(cs.req.Header.Get("Content-Type"), check.Equals, "application/json") c.Assert(cs.req.Header.Get("range"), check.Equals, "") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var jsonBody client.DownloadAction err = json.Unmarshal(body, &jsonBody) @@ -790,7 +789,7 @@ c.Check(jsonBody.HeaderPeek, check.Equals, true) // ensure we can read the response - content, err := ioutil.ReadAll(rc) + content, err := io.ReadAll(rc) c.Assert(err, check.IsNil) c.Check(string(content), check.Equals, cs.rsp) // and we can close it @@ -827,7 +826,7 @@ // check we posted the right stuff c.Assert(cs.req.Header.Get("Content-Type"), check.Equals, "application/json") c.Assert(cs.req.Header.Get("range"), check.Equals, "bytes: 64-") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var jsonBody client.DownloadAction err = json.Unmarshal(body, &jsonBody) @@ -839,7 +838,7 @@ c.Check(jsonBody.ResumeToken, check.Equals, "some-token") // ensure we can read the response - content, err := ioutil.ReadAll(rc) + content, err := io.ReadAll(rc) c.Assert(err, check.IsNil) c.Check(string(content), check.Equals, cs.rsp) // and we can close it @@ -865,7 +864,7 @@ ValidationSets []string `json:"validation-sets"` Action string `json:"action"` } - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var decodedBody req @@ -900,7 +899,7 @@ Time string `json:"time"` HoldLevel string `json:"hold-level"` } - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var decodedBody req diff -Nru snapd-2.62+23.10/client/snapctl.go snapd-2.63+23.10/client/snapctl.go --- snapd-2.62+23.10/client/snapctl.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/snapctl.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" ) // InternalSnapctlCmdNeedsStdin returns true if the given snapctl command @@ -72,7 +71,7 @@ var stdinData []byte if stdin != nil { limitedStdin := &io.LimitedReader{R: stdin, N: stdinReadLimit + 1} - stdinData, err = ioutil.ReadAll(limitedStdin) + stdinData, err = io.ReadAll(limitedStdin) if err != nil { return nil, nil, fmt.Errorf("cannot read stdin: %v", err) } diff -Nru snapd-2.62+23.10/client/snapshot_test.go snapd-2.63+23.10/client/snapshot_test.go --- snapd-2.62+23.10/client/snapshot_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/snapshot_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "crypto/sha256" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -185,7 +185,7 @@ } if t.status == 200 { - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) c.Assert(err, check.IsNil) c.Assert(string(buf), check.Equals, t.content) } @@ -223,7 +223,7 @@ c.Assert(cs.req.Header.Get("Content-Length"), check.Equals, strconv.Itoa(len(fakeSnapshotData))) c.Check(importSet.ID, check.Equals, t.setID, comm) c.Check(importSet.Snaps, check.DeepEquals, []string{"baz", "bar", "foo"}, comm) - d, err := ioutil.ReadAll(cs.req.Body) + d, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) c.Check(string(d), check.Equals, fakeSnapshotData) } diff -Nru snapd-2.62+23.10/client/systems_test.go snapd-2.63+23.10/client/systems_test.go --- snapd-2.62+23.10/client/systems_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/systems_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "encoding/json" - "io/ioutil" + "io" "gopkg.in/check.v1" @@ -140,7 +140,7 @@ c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/systems/1234") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) @@ -182,7 +182,7 @@ c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/systems/20201212") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) @@ -377,7 +377,7 @@ c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/systems/1234") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) diff -Nru snapd-2.62+23.10/client/users_test.go snapd-2.63+23.10/client/users_test.go --- snapd-2.62+23.10/client/users_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/users_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package client_test import ( - "io/ioutil" + "io" . "gopkg.in/check.v1" @@ -46,7 +46,7 @@ {ID: 11, Username: "one-user", Email: "user@test.com"}, }) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, IsNil) c.Assert(string(body), Equals, `{"action":"remove","username":"one-user"}`) } @@ -69,7 +69,7 @@ c.Assert(err, ErrorMatches, "no can do") c.Assert(removed, IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, IsNil) c.Assert(string(body), Equals, `{"action":"remove","username":"one-user"}`) } @@ -92,7 +92,7 @@ c.Assert(cs.req.URL.Path, Equals, "/v2/users") c.Assert(err, IsNil) - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, IsNil) c.Assert(string(body), Equals, `{"action":"create","email":"one@email.com","sudoer":true,"known":true}`) @@ -170,7 +170,7 @@ for _, req := range cs.reqs { c.Assert(req.Method, Equals, "POST") c.Assert(req.URL.Path, Equals, "/v2/users") - data, err := ioutil.ReadAll(req.Body) + data, err := io.ReadAll(req.Body) c.Assert(err, IsNil) bodies = append(bodies, string(data)) } diff -Nru snapd-2.62+23.10/client/validate_test.go snapd-2.63+23.10/client/validate_test.go --- snapd-2.62+23.10/client/validate_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/client/validate_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "encoding/json" - "io/ioutil" + "io" "net/url" "gopkg.in/check.v1" @@ -96,7 +96,7 @@ }) c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/validation-sets/foo/bar") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) @@ -126,7 +126,7 @@ }) c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/validation-sets/foo/bar") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) @@ -164,7 +164,7 @@ c.Assert(cs.cli.ForgetValidationSet("foo", "bar", 3), check.IsNil) c.Check(cs.req.Method, check.Equals, "POST") c.Check(cs.req.URL.Path, check.Equals, "/v2/validation-sets/foo/bar") - body, err := ioutil.ReadAll(cs.req.Body) + body, err := io.ReadAll(cs.req.Body) c.Assert(err, check.IsNil) var req map[string]interface{} err = json.Unmarshal(body, &req) diff -Nru snapd-2.62+23.10/cmd/Makefile.am snapd-2.63+23.10/cmd/Makefile.am --- snapd-2.62+23.10/cmd/Makefile.am 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/Makefile.am 2024-04-24 00:00:39.000000000 +0000 @@ -108,12 +108,11 @@ # for the hack target also: snap-update-ns/snap-update-ns: snap-update-ns/*.go snap-update-ns/*.[ch] - cd snap-update-ns && GOPATH=$(or $(GOPATH),$(realpath $(srcdir)/../../../../..)) go build \ - -ldflags='-extldflags=-static -linkmode=external' -v + cd snap-update-ns && go build -ldflags='-extldflags=-static -linkmode=external' -v snap-seccomp/snap-seccomp: snap-seccomp/*.go - cd snap-seccomp && GOPATH=$(or $(GOPATH),$(realpath $(srcdir)/../../../../..)) go build -v + cd snap-seccomp && go build -v snapd-apparmor/snapd-apparmor: snapd-apparmor/*.go - cd snapd-apparmor && GOPATH=$(or $(GOPATH),$(realpath $(srcdir)/../../../../..)) go build -v + cd snapd-apparmor && go build -v ## ## libsnap-confine-private.a @@ -357,6 +356,7 @@ snap-confine/cookie-support-test.c \ snap-confine/mount-support-test.c \ snap-confine/ns-support-test.c \ + snap-confine/seccomp-support-test.c \ snap-confine/snap-confine-args-test.c \ snap-confine/snap-confine-invocation-test.c snap_confine_unit_tests_CFLAGS = $(snap_confine_snap_confine_CFLAGS) $(GLIB_CFLAGS) diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/apparmor-support.c snapd-2.63+23.10/cmd/libsnap-confine-private/apparmor-support.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/apparmor-support.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/apparmor-support.c 2024-04-24 00:00:39.000000000 +0000 @@ -37,6 +37,7 @@ #define SC_AA_ENFORCE_STR "enforce" #define SC_AA_COMPLAIN_STR "complain" #define SC_AA_MIXED_STR "mixed" +#define SC_AA_KILL_STR "kill" #define SC_AA_UNCONFINED_STR "unconfined" void sc_init_apparmor_support(struct sc_apparmor *apparmor) @@ -92,8 +93,9 @@ // expect to be confined by a profile with the name of a valid // snap-confine binary since if not we may be executed under a // profile with more permissions than expected - if (label != NULL && sc_streq(mode, SC_AA_ENFORCE_STR) - && sc_is_expected_path(label)) { + bool confined_mode = sc_streq(mode, SC_AA_ENFORCE_STR) + || sc_streq(mode, SC_AA_KILL_STR); + if (label != NULL && confined_mode && sc_is_expected_path(label)) { apparmor->is_confined = true; } else { apparmor->is_confined = false; @@ -106,6 +108,8 @@ apparmor->mode = SC_AA_ENFORCE; } else if (mode != NULL && strcmp(mode, SC_AA_MIXED_STR) == 0) { apparmor->mode = SC_AA_MIXED; + } else if (mode != NULL && strcmp(mode, SC_AA_KILL_STR) == 0) { + apparmor->mode = SC_AA_KILL; } else { apparmor->mode = SC_AA_INVALID; } diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/apparmor-support.h snapd-2.63+23.10/cmd/libsnap-confine-private/apparmor-support.h --- snapd-2.62+23.10/cmd/libsnap-confine-private/apparmor-support.h 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/apparmor-support.h 2024-04-24 00:00:39.000000000 +0000 @@ -34,6 +34,8 @@ SC_AA_COMPLAIN, // The enforcement mode is "mixed" SC_AA_MIXED, + // The enforcement mode is "kill" + SC_AA_KILL, }; /** diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/cgroup-support-test.c snapd-2.63+23.10/cmd/libsnap-confine-private/cgroup-support-test.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/cgroup-support-test.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/cgroup-support-test.c 2024-04-24 00:00:39.000000000 +0000 @@ -250,7 +250,10 @@ static void cgroupv2_own_group_tear_down(cgroupv2_own_group_fixture *fixture, gconstpointer user_data) { sc_set_self_cgroup_path("/proc/self/cgroup"); - g_remove(fixture->self_cgroup); + if (g_remove(fixture->self_cgroup) < 0) { + /* test may have removed the file */ + g_assert_cmpint(errno, ==, ENOENT); + } g_free(fixture->self_cgroup); } diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/classic-test.c snapd-2.63+23.10/cmd/libsnap-confine-private/classic-test.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/classic-test.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/classic-test.c 2024-04-24 00:00:39.000000000 +0000 @@ -59,7 +59,8 @@ const char *old = meta_snap_yaml; if (mocked != NULL) { meta_snap_yaml = "snap-yaml.test"; - g_file_set_contents(meta_snap_yaml, mocked, -1, NULL); + g_assert_true(g_file_set_contents + (meta_snap_yaml, mocked, -1, NULL)); } else { meta_snap_yaml = "snap-yaml.missing"; } diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/snap-test.c snapd-2.63+23.10/cmd/libsnap-confine-private/snap-test.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/snap-test.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/snap-test.c 2024-04-24 00:00:39.000000000 +0000 @@ -350,6 +350,16 @@ "snap instance name can contain only one underscore"); sc_error_free(err); + // too long, 52 + sc_instance_name_validate + ("0123456789012345678901234567890123456789012345678901", &err); + g_assert_nonnull(err); + g_assert_true(sc_error_match + (err, SC_SNAP_DOMAIN, SC_SNAP_INVALID_INSTANCE_NAME)); + g_assert_cmpstr(sc_error_msg(err), ==, + "snap instance name can be at most 51 characters long"); + sc_error_free(err); + const char *valid_names[] = { "aa", "aaa", "aaaa", "aa_a", "aa_1", "aa_123", "aa_0123456789", diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/snap.c snapd-2.63+23.10/cmd/libsnap-confine-private/snap.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/snap.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/snap.c 2024-04-24 00:00:39.000000000 +0000 @@ -115,6 +115,14 @@ "snap instance name cannot be NULL"); goto out; } + + if (strlen(instance_name) > SNAP_INSTANCE_LEN) { + err = + sc_error_init(SC_SNAP_DOMAIN, SC_SNAP_INVALID_INSTANCE_NAME, + "snap instance name can be at most %d characters long", + SNAP_INSTANCE_LEN); + goto out; + } // instance name length + 1 extra overflow + 1 NULL char s[SNAP_INSTANCE_LEN + 1 + 1] = { 0 }; strncpy(s, instance_name, sizeof(s) - 1); diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/utils-test.c snapd-2.63+23.10/cmd/libsnap-confine-private/utils-test.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/utils-test.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/utils-test.c 2024-04-24 00:00:39.000000000 +0000 @@ -146,6 +146,13 @@ } } +static void my_unlink(const char *path) +{ + if (unlink(path) != 0 && errno != ENOENT) { + die("cannot unlink: %s", path); + } +} + /** * Perform the rest of testing in a ephemeral directory. * @@ -222,6 +229,37 @@ _test_sc_nonfatal_mkpath(dirname, subdirname); } +static void test_sc_is_container__empty(void) +{ + g_test_in_ephemeral_dir(); + g_test_queue_destroy((GDestroyNotify) my_unlink, "container"); + g_assert_true(g_file_set_contents("container", "", -1, NULL)); + g_assert_false(_sc_is_in_container("container")); +} + +static void test_sc_is_container__lxc(void) +{ + g_test_in_ephemeral_dir(); + g_test_queue_destroy((GDestroyNotify) my_unlink, "container"); + g_assert_true(g_file_set_contents("container", "lxc", -1, NULL)); + g_assert_true(_sc_is_in_container("container")); +} + +static void test_sc_is_container__lxc_with_newline(void) +{ + g_test_in_ephemeral_dir(); + g_test_queue_destroy((GDestroyNotify) my_unlink, "container"); + g_assert_true(g_file_set_contents("container", "lxc\n", -1, NULL)); + g_assert_true(_sc_is_in_container("container")); +} + +static void test_sc_is_container__no_file(void) +{ + g_test_in_ephemeral_dir(); + g_test_queue_destroy((GDestroyNotify) my_unlink, "container"); + g_assert_false(_sc_is_in_container("container")); +} + static void __attribute__((constructor)) init(void) { g_test_add_func("/utils/parse_bool", test_parse_bool); @@ -232,4 +270,12 @@ test_sc_nonfatal_mkpath__relative); g_test_add_func("/utils/sc_nonfatal_mkpath/absolute", test_sc_nonfatal_mkpath__absolute); + g_test_add_func("/utils/sc_is_in_container/empty", + test_sc_is_container__empty); + g_test_add_func("/utils/sc_is_in_container/no_file", + test_sc_is_container__no_file); + g_test_add_func("/utils/sc_is_in_container/lxc", + test_sc_is_container__lxc); + g_test_add_func("/utils/sc_is_in_container/lxc_newline", + test_sc_is_container__lxc_with_newline); } diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/utils.c snapd-2.63+23.10/cmd/libsnap-confine-private/utils.c --- snapd-2.62+23.10/cmd/libsnap-confine-private/utils.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/utils.c 2024-04-24 00:00:39.000000000 +0000 @@ -261,3 +261,44 @@ } return false; } + +const char *run_systemd_container = "/run/systemd/container"; + +static bool _sc_is_in_container(const char *p) +{ + // see what systemd-detect-virt --container does in, see: + // https://github.com/systemd/systemd/blob/5dcd6b1d55a1cfe247621d70f0e25d020de6e0ed/src/basic/virt.c#L749-L755 + // https://systemd.io/CONTAINER_INTERFACE/ + FILE *in SC_CLEANUP(sc_cleanup_file) = fopen(p, "r"); + if (in == NULL) { + return false; + } + + char container[128] = { 0 }; + + if (fgets(container, sizeof(container), in) == NULL) { + /* nothing read or other error? */ + return false; + } + + size_t r = strnlen(container, sizeof container); + // TODO add sc_str_chomp()? + if (r > 0 && container[r - 1] == '\n') { + /* replace trailing newline */ + container[r - 1] = 0; + r--; + } + + if (r == 0) { + /* empty or just a newline */ + return false; + } + + debug("detected container environment: %s", container); + return true; +} + +bool sc_is_in_container(void) +{ + return _sc_is_in_container(run_systemd_container); +} diff -Nru snapd-2.62+23.10/cmd/libsnap-confine-private/utils.h snapd-2.63+23.10/cmd/libsnap-confine-private/utils.h --- snapd-2.62+23.10/cmd/libsnap-confine-private/utils.h 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/libsnap-confine-private/utils.h 2024-04-24 00:00:39.000000000 +0000 @@ -50,6 +50,11 @@ bool sc_is_reexec_enabled(void); /** + * Return true if executing inside a container. + **/ +bool sc_is_in_container(void); + +/** * sc_identity describes the user performing certain operation. * * UID and GID represent user and group accounts numbers and are controlled by diff -Nru snapd-2.62+23.10/cmd/snap/cmd_ack.go snapd-2.63+23.10/cmd/snap/cmd_ack.go --- snapd-2.62+23.10/cmd/snap/cmd_ack.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_ack.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "github.com/jessevdk/go-flags" @@ -60,7 +60,7 @@ } func ackFile(cli *client.Client, assertFile string) error { - assertData, err := ioutil.ReadFile(assertFile) + assertData, err := os.ReadFile(assertFile) if err != nil { return err } diff -Nru snapd-2.62+23.10/cmd/snap/cmd_aliases_test.go snapd-2.63+23.10/cmd/snap/cmd_aliases_test.go --- snapd-2.62+23.10/cmd/snap/cmd_aliases_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_aliases_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package main_test import ( - "io/ioutil" + "io" "net/http" . "gopkg.in/check.v1" @@ -46,7 +46,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/aliases") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -82,7 +82,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/aliases") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -114,7 +114,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/aliases") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -132,7 +132,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/aliases") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ diff -Nru snapd-2.62+23.10/cmd/snap/cmd_auto_import.go snapd-2.63+23.10/cmd/snap/cmd_auto_import.go --- snapd-2.62+23.10/cmd/snap/cmd_auto_import.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_auto_import.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "crypto" "encoding/base64" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -130,7 +129,7 @@ } func autoImportFromSpool(cli *client.Client) (added int, err error) { - files, err := ioutil.ReadDir(dirs.SnapAssertsSpoolDir) + files, err := os.ReadDir(dirs.SnapAssertsSpoolDir) if os.IsNotExist(err) { return 0, nil } @@ -185,10 +184,10 @@ return added, nil } -var ioutilTempDir = ioutil.TempDir +var osMkdirTemp = os.MkdirTemp func tryMount(deviceName string) (string, error) { - tmpMountTarget, err := ioutilTempDir("", "snapd-auto-import-mount-") + tmpMountTarget, err := osMkdirTemp("", "snapd-auto-import-mount-") if err != nil { err = fmt.Errorf("cannot create temporary mount point: %v", err) logger.Noticef("error: %v", err) @@ -275,7 +274,7 @@ return nil } for _, removableAttr := range removable { - val, err := ioutil.ReadFile(removableAttr) + val, err := os.ReadFile(removableAttr) if err != nil || string(val) != "1\n" { // non removable continue @@ -294,7 +293,7 @@ } for _, partAttr := range partitionAttrs { - val, err := ioutil.ReadFile(partAttr) + val, err := os.ReadFile(partAttr) if err != nil || string(val) != "1\n" { // non partition? continue diff -Nru snapd-2.62+23.10/cmd/snap/cmd_auto_import_test.go snapd-2.63+23.10/cmd/snap/cmd_auto_import_test.go --- snapd-2.62+23.10/cmd/snap/cmd_auto_import_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_auto_import_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "os" "path/filepath" @@ -50,7 +50,7 @@ case 0: c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, "/v2/assertions") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(postData, DeepEquals, fakeAssertData) fmt.Fprintln(w, `{"type": "sync", "result": {"ready": true, "status": "Done"}}`) @@ -58,7 +58,7 @@ case 1: c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, "/v2/users") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(postData), Equals, `{"action":"create","automatic":true}`) @@ -176,7 +176,7 @@ // in the output c.Check(logbuf.String(), Matches, "(?ms).*queuing for later.*\n") - files, err := ioutil.ReadDir(dirs.SnapAssertsSpoolDir) + files, err := os.ReadDir(dirs.SnapAssertsSpoolDir) c.Assert(err, IsNil) c.Check(files, HasLen, 1) c.Check(files[0].Name(), Equals, "iOkaeet50rajLvL-0Qsf2ELrTdn3XIXRIBlDewcK02zwRi3_TJlUOTl9AaiDXmDn.assert") @@ -198,7 +198,7 @@ case 0: c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, "/v2/assertions") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(postData, DeepEquals, fakeAssertData) fmt.Fprintln(w, `{"type": "sync", "result": {"ready": true, "status": "Done"}}`) @@ -206,7 +206,7 @@ case 1: c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, "/v2/users") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(postData), Equals, `{"action":"create","automatic":true}`) @@ -525,7 +525,7 @@ case 0: c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, "/v2/assertions") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(postData, DeepEquals, fakeAssertData) fmt.Fprintln(w, `{"type": "sync", "result": {"ready": true, "status": "Done"}}`) @@ -533,7 +533,7 @@ case 1: c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, "/v2/users") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(postData), Equals, `{"action":"create","automatic":true}`) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_connections_test.go snapd-2.63+23.10/cmd/snap/cmd_connections_test.go --- snapd-2.62+23.10/cmd/snap/cmd_connections_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_connections_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "net/url" @@ -38,7 +38,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -71,7 +71,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) fmt.Fprintln(w, `{"type": "error", "result": {"message": "not found", "value": "foo", "kind": "snap-not-found"}, "status-code": 404}`) @@ -99,7 +99,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -141,7 +141,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -257,7 +257,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -360,7 +360,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -412,7 +412,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -441,7 +441,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -615,7 +615,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -829,7 +829,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") c.Check(r.URL.Query(), DeepEquals, query) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ diff -Nru snapd-2.62+23.10/cmd/snap/cmd_connectivity_check_test.go snapd-2.63+23.10/cmd/snap/cmd_connectivity_check_test.go --- snapd-2.62+23.10/cmd/snap/cmd_connectivity_check_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_connectivity_check_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "gopkg.in/check.v1" @@ -37,7 +37,7 @@ c.Check(r.Method, check.Equals, "GET") c.Check(r.URL.Path, check.Equals, "/v2/debug") c.Check(r.URL.RawQuery, check.Equals, "aspect=connectivity") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Check(err, check.IsNil) c.Check(data, check.HasLen, 0) fmt.Fprintln(w, `{"type": "sync", "result": {}}`) @@ -64,7 +64,7 @@ c.Check(r.Method, check.Equals, "GET") c.Check(r.URL.Path, check.Equals, "/v2/debug") c.Check(r.URL.RawQuery, check.Equals, "aspect=connectivity") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Check(err, check.IsNil) c.Check(data, check.HasLen, 0) fmt.Fprintln(w, `{"type": "sync", "result": {"connectivity":false,"unreachable":["foo.bar.com"]}}`) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_debug_model_test.go snapd-2.63+23.10/cmd/snap/cmd_debug_model_test.go --- snapd-2.62+23.10/cmd/snap/cmd_debug_model_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_debug_model_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "gopkg.in/check.v1" @@ -37,7 +37,7 @@ c.Check(r.Method, check.Equals, "GET") c.Check(r.URL.Path, check.Equals, "/v2/debug") c.Check(r.URL.RawQuery, check.Equals, "aspect=model") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Check(err, check.IsNil) c.Check(string(data), check.Equals, "") fmt.Fprintln(w, `{"type": "sync", "result": {"model": "some-model-json"}}`) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_debug_seeding_test.go snapd-2.63+23.10/cmd/snap/cmd_debug_seeding_test.go --- snapd-2.62+23.10/cmd/snap/cmd_debug_seeding_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_debug_seeding_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" . "gopkg.in/check.v1" @@ -463,7 +463,7 @@ c.Assert(r.Method, Equals, "GET", comment) c.Assert(r.URL.Path, Equals, "/v2/debug", comment) c.Assert(r.URL.RawQuery, Equals, "aspect=seeding", comment) - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Assert(err, IsNil, comment) c.Assert(string(data), Equals, "", comment) fmt.Fprintln(w, t.jsonResp) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_ensure_state_soon_test.go snapd-2.63+23.10/cmd/snap/cmd_ensure_state_soon_test.go --- snapd-2.62+23.10/cmd/snap/cmd_ensure_state_soon_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_ensure_state_soon_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "gopkg.in/check.v1" @@ -37,7 +37,7 @@ c.Check(r.Method, check.Equals, "POST") c.Check(r.URL.Path, check.Equals, "/v2/debug") c.Check(r.URL.RawQuery, check.Equals, "") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Check(err, check.IsNil) c.Check(data, check.DeepEquals, []byte(`{"action":"ensure-state-soon"}`)) fmt.Fprintln(w, `{"type": "sync", "result": true}`) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_get_base_declaration_test.go snapd-2.63+23.10/cmd/snap/cmd_get_base_declaration_test.go --- snapd-2.62+23.10/cmd/snap/cmd_get_base_declaration_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_get_base_declaration_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "gopkg.in/check.v1" @@ -37,7 +37,7 @@ c.Check(r.Method, check.Equals, "GET") c.Check(r.URL.Path, check.Equals, "/v2/debug") c.Check(r.URL.RawQuery, check.Equals, "aspect=base-declaration") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Check(err, check.IsNil) c.Check(data, check.HasLen, 0) fmt.Fprintln(w, `{"type": "sync", "result": {"base-declaration": "hello"}}`) @@ -62,7 +62,7 @@ c.Check(r.Method, check.Equals, "GET") c.Check(r.URL.Path, check.Equals, "/v2/debug") c.Check(r.URL.RawQuery, check.Equals, "aspect=base-declaration") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Check(err, check.IsNil) c.Check(data, check.HasLen, 0) fmt.Fprintln(w, `{"type": "sync", "result": {"base-declaration": "hello"}}`) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_interface_test.go snapd-2.63+23.10/cmd/snap/cmd_interface_test.go --- snapd-2.62+23.10/cmd/snap/cmd_interface_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_interface_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package main_test import ( - "io/ioutil" + "io" "net/http" "os" @@ -55,7 +55,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/interfaces") c.Check(r.URL.RawQuery, Equals, "select=connected") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -75,7 +75,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/interfaces") c.Check(r.URL.RawQuery, Equals, "select=all") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -95,7 +95,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/interfaces") c.Check(r.URL.RawQuery, Equals, "select=connected") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -125,7 +125,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/interfaces") c.Check(r.URL.RawQuery, Equals, "select=all") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -159,7 +159,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/interfaces") c.Check(r.URL.RawQuery, Equals, "doc=true&names=network&plugs=true&select=all&slots=true") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -197,7 +197,7 @@ c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/interfaces") c.Check(r.URL.RawQuery, Equals, "doc=true&names=serial-port&plugs=true&select=all&slots=true") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ diff -Nru snapd-2.62+23.10/cmd/snap/cmd_interfaces_test.go snapd-2.63+23.10/cmd/snap/cmd_interfaces_test.go --- snapd-2.62+23.10/cmd/snap/cmd_interfaces_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_interfaces_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package main_test import ( - "io/ioutil" + "io" "net/http" "os" @@ -36,7 +36,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -65,7 +65,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -96,7 +96,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -162,7 +162,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -203,7 +203,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -270,7 +270,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -337,7 +337,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -386,7 +386,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -430,7 +430,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -485,7 +485,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -528,7 +528,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -549,7 +549,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ @@ -648,7 +648,7 @@ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { c.Check(r.Method, Equals, "GET") c.Check(r.URL.Path, Equals, "/v2/connections") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(body, DeepEquals, []byte{}) EncodeResponseBody(c, w, map[string]interface{}{ diff -Nru snapd-2.62+23.10/cmd/snap/cmd_keys_test.go snapd-2.63+23.10/cmd/snap/cmd_keys_test.go --- snapd-2.62+23.10/cmd/snap/cmd_keys_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_keys_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -70,7 +69,7 @@ s.tempdir = c.MkDir() for _, fileName := range []string{"pubring.gpg", "secring.gpg", "trustdb.gpg"} { - data, err := ioutil.ReadFile(filepath.Join("test-data", fileName)) + data, err := os.ReadFile(filepath.Join("test-data", fileName)) c.Assert(err, IsNil) err = os.WriteFile(filepath.Join(s.tempdir, fileName), data, 0644) c.Assert(err, IsNil) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_login_test.go snapd-2.63+23.10/cmd/snap/cmd_login_test.go --- snapd-2.62+23.10/cmd/snap/cmd_login_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_login_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" . "gopkg.in/check.v1" @@ -37,7 +37,7 @@ case 0: c.Check(r.URL.Path, Equals, "/v2/login") c.Check(r.Method, Equals, "POST") - postData, err := ioutil.ReadAll(r.Body) + postData, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(postData), Equals, `{"email":"foo@example.com","password":"some-password"}`+"\n") fmt.Fprintln(w, mockLoginRsp) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_quota_test.go snapd-2.63+23.10/cmd/snap/cmd_quota_test.go --- snapd-2.62+23.10/cmd/snap/cmd_quota_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_quota_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,7 @@ "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strings" @@ -142,7 +142,7 @@ c.Check(r.URL.Path, check.Equals, "/v2/quotas") c.Check(r.Method, check.Equals, "POST") - buf, err := ioutil.ReadAll(r.Body) + buf, err := io.ReadAll(r.Body) c.Assert(err, check.IsNil) switch opts.action { diff -Nru snapd-2.62+23.10/cmd/snap/cmd_reboot_test.go snapd-2.63+23.10/cmd/snap/cmd_reboot_test.go --- snapd-2.62+23.10/cmd/snap/cmd_reboot_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_reboot_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "strings" @@ -107,7 +107,7 @@ c.Check(r.Method, Equals, "POST") c.Check(r.URL.Path, Equals, tc.expectedEndpoint, Commentf("%v", tc.cmdline)) c.Check(r.URL.RawQuery, Equals, "") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(string(body), Equals, tc.expectedJSON+"\n") fmt.Fprintln(w, `{"type": "sync", "result": {}}`) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_run.go snapd-2.63+23.10/cmd/snap/cmd_run.go --- snapd-2.62+23.10/cmd/snap/cmd_run.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_run.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,6 +1,6 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2014-2022 Canonical Ltd + * Copyright (C) 2014-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -25,7 +25,6 @@ "errors" "fmt" "io" - "io/ioutil" "net" "os" "os/exec" @@ -41,6 +40,7 @@ "github.com/jessevdk/go-flags" "github.com/snapcore/snapd/client" + "github.com/snapcore/snapd/cmd/snaplock/runinhibit" "github.com/snapcore/snapd/desktop/portal" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/features" @@ -260,16 +260,6 @@ return x.snapRunApp(snapApp, args) } -func maybeWaitWhileInhibited(ctx context.Context, snapName string) error { - // If the snap is inhibited from being used then postpone running it until - // that condition passes. Inhibition UI can be dismissed by the user, in - // which case we don't run the application at all. - if features.RefreshAppAwareness.IsEnabled() { - return waitWhileInhibited(ctx, snapName) - } - return nil -} - // antialias changes snapApp and args if snapApp is actually an alias // for something else. If not, or if the args aren't what's expected // for completion, it returns them unchanged. @@ -493,30 +483,97 @@ return opts, raw, nil } +// isSnapRefreshConflictDetected detects if snap refreshed was started while not +// holding the inhibition hint file lock. +// +// For context, on snap first install, the inhibition hint lock file is not created +// so we cannot hold it. It is created after the first refresh. This allows for a +// window where we don't hold the lock before the tracking cgroup is created where +// a snap refresh could start. +func isSnapRefreshConflictDetected(app *snap.AppInfo, hintFlock *osutil.FileLock) bool { + if !features.RefreshAppAwareness.IsEnabled() || app.IsService() || hintFlock != nil { + // Skip check + return false + } + + // We started without a hint lock file, if it exists now this means that a + // refresh was started. + return osutil.FileExists(runinhibit.HintFile(app.Snap.InstanceName())) +} + func (x *cmdRun) snapRunApp(snapApp string, args []string) error { if x.DebugLog { os.Setenv("SNAPD_DEBUG", "1") logger.Debugf("enabled debug logging of early snap startup") } snapName, appName := snap.SplitSnapApp(snapApp) - info, err := getSnapInfo(snapName, snap.R(0)) - if err != nil { - return err - } - app := info.Apps[appName] - if app == nil { - return fmt.Errorf(i18n.G("cannot find app %q in %q"), appName, snapName) - } + var retryCnt int + for { + if retryCnt > 1 { + // This should never happen, but it is better to fail instead + // of retrying forever. + return fmt.Errorf("race condition detected, snap-run can only retry once") + } + + info, app, hintFlock, err := maybeWaitWhileInhibited(context.Background(), x.client, snapName, appName) + if errors.Is(err, errSnapRefreshConflict) { + // Possible race condition detected, let's retry. + + // This will not retry infinitely because this can only be caused by + // a missing inhibition hint initially which is now created due to a + // refresh. + retryCnt++ + logger.Debugf("retry due to possible snap refresh conflict detected") + continue + } + if err != nil { + return err + } + + closeFlockOrRetry := func() error { + // This needs to run inside the transient cgroup created for a snap + // such that any pending refresh of the snap will get blocked after + // we release the lock. + if hintFlock != nil { + // It is okay to release the lock here (beforeExec) because snapd unless forced + // will not inhibit the snap and do a refresh anymore because it detects app + // processes are running for via the established transient cgroup cgroup. + + // Note: We cannot rely on O_CLOEXEC to unlock because might run in + // fork + exec mode like when running under gdb or strace. + hintFlock.Close() + return nil + } + // hintFlock might be nil if the hint file did not exist + if isSnapRefreshConflictDetected(app, hintFlock) { + return errSnapRefreshConflict + } + return nil + } - if !app.IsService() { - // TODO: use signal.NotifyContext as context when snap-run flow is finalized - if err := maybeWaitWhileInhibited(context.Background(), snapName); err != nil { + err = x.runSnapConfine(info, app.SecurityTag(), snapApp, "", closeFlockOrRetry, args) + if errors.Is(err, errSnapRefreshConflict) { + // Possible race condition detected, let's retry. + // + // This will not retry infinitely because this can only be caused by + // a missing inhibition hint initially which is now created due to a + // refresh. + retryCnt++ + logger.Debugf("retry due to possible snap refresh conflict detected") + continue + } + if err != nil { + // Make sure we release the lock in case runSnapConfine fails before + // closing hint lock file, it is fine if we double close. + if hintFlock != nil { + hintFlock.Close() + } return err } - } - return x.runSnapConfine(info, app.SecurityTag(), snapApp, "", args) + return nil + } } func (x *cmdRun) snapRunHook(snapName string) error { @@ -535,7 +592,7 @@ return fmt.Errorf(i18n.G("cannot find hook %q in %q"), x.HookName, snapName) } - return x.runSnapConfine(info, hook.SecurityTag(), snapName, hook.Name, nil) + return x.runSnapConfine(info, hook.SecurityTag(), snapName, hook.Name, nil, nil) } func (x *cmdRun) snapRunTimer(snapApp, timer string, args []string) error { @@ -894,7 +951,7 @@ func (x *cmdRun) runCmdWithTraceExec(origCmd []string, envForExec envForExecFunc) error { // setup private tmp dir with strace fifo - straceTmp, err := ioutil.TempDir("", "exec-trace") + straceTmp, err := os.MkdirTemp("", "exec-trace") if err != nil { return err } @@ -1047,7 +1104,7 @@ return err } -func (x *cmdRun) runSnapConfine(info *snap.Info, securityTag, snapApp, hook string, args []string) error { +func (x *cmdRun) runSnapConfine(info *snap.Info, securityTag, snapApp, hook string, beforeExec func() error, args []string) error { snapConfine, err := snapdHelperPath("snap-confine") if err != nil { return err @@ -1062,7 +1119,7 @@ logger.Debugf("executing snap-confine from %s", snapConfine) - snapName, _ := snap.SplitSnapApp(snapApp) + snapName, appName := snap.SplitSnapApp(snapApp) opts, err := getSnapDirOptions(snapName) if err != nil { return fmt.Errorf("cannot get snap dir options: %w", err) @@ -1210,7 +1267,6 @@ // // For more information about systemd cgroups, including unit types, see: // https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/ - _, appName := snap.SplitSnapApp(snapApp) needsTracking := true if app := info.Apps[appName]; hook == "" && app != nil && app.IsService() { // If we are running a service app then we do not need to use @@ -1238,6 +1294,17 @@ // Allow using the session bus for all apps but not for hooks. allowSessionBus := hook == "" // Track, or confirm existing tracking from systemd. + if err := cgroupConfirmSystemdAppTracking(securityTag); err != nil { + if err != cgroup.ErrCannotTrackProcess { + return err + } + } else { + // A transient scope was already created in a previous attempt. Skip creating + // another transient scope to avoid leaking cgroups. + // + // Note: This could happen if beforeExec fails and triggers a retry. + needsTracking = false + } if needsTracking { opts := &cgroup.TrackingOptions{AllowSessionBus: allowSessionBus} if err = cgroupCreateTransientScopeForTracking(securityTag, opts); err != nil { @@ -1251,6 +1318,13 @@ logger.Debugf("snap refreshes will not be postponed by this process") } } + + if beforeExec != nil { + if err := beforeExec(); err != nil { + return err + } + } + logger.StartupStageTimestamp("snap to snap-confine") if x.TraceExec { return x.runCmdWithTraceExec(cmd, envForExec) @@ -1278,7 +1352,7 @@ func getSnapDirOptions(snap string) (*dirs.SnapDirOptions, error) { var opts dirs.SnapDirOptions - data, err := ioutil.ReadFile(filepath.Join(dirs.SnapSeqDir, snap+".json")) + data, err := os.ReadFile(filepath.Join(dirs.SnapSeqDir, snap+".json")) if errors.Is(err, os.ErrNotExist) { return &opts, nil } else if err != nil { @@ -1301,3 +1375,4 @@ var cgroupCreateTransientScopeForTracking = cgroup.CreateTransientScopeForTracking var cgroupConfirmSystemdServiceTracking = cgroup.ConfirmSystemdServiceTracking +var cgroupConfirmSystemdAppTracking = cgroup.ConfirmSystemdAppTracking diff -Nru snapd-2.62+23.10/cmd/snap/cmd_run_test.go snapd-2.63+23.10/cmd/snap/cmd_run_test.go --- snapd-2.62+23.10/cmd/snap/cmd_run_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_run_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2016-2022 Canonical Ltd + * Copyright (C) 2016-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -33,24 +33,18 @@ "gopkg.in/check.v1" - "github.com/godbus/dbus" snaprun "github.com/snapcore/snapd/cmd/snap" "github.com/snapcore/snapd/cmd/snaplock/runinhibit" - "github.com/snapcore/snapd/dbusutil" - "github.com/snapcore/snapd/dbusutil/dbustest" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/features" "github.com/snapcore/snapd/logger" "github.com/snapcore/snapd/osutil" "github.com/snapcore/snapd/osutil/strace" - "github.com/snapcore/snapd/progress" - "github.com/snapcore/snapd/progress/progresstest" "github.com/snapcore/snapd/sandbox/cgroup" "github.com/snapcore/snapd/sandbox/selinux" "github.com/snapcore/snapd/snap" "github.com/snapcore/snapd/snap/snaptest" "github.com/snapcore/snapd/testutil" - usersessionclient "github.com/snapcore/snapd/usersession/client" "github.com/snapcore/snapd/x11" ) @@ -100,8 +94,28 @@ s.AddCleanup(snaprun.MockCreateTransientScopeForTracking(func(string, *cgroup.TrackingOptions) error { return nil })) - restoreIsGraphicalSession := snaprun.MockIsGraphicalSession(false) - s.AddCleanup(restoreIsGraphicalSession) + s.AddCleanup(snaprun.MockConfirmSystemdAppTracking(func(securityTag string) error { + // default to showing no existing tracking + return cgroup.ErrCannotTrackProcess + })) + // Mock notices/connections api calls + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/notices": + c.Assert(r.Method, check.Equals, "POST") + EncodeResponseBody(c, w, map[string]any{ + "type": "sync", + "result": map[string]string{"id": "1"}, + }) + case "/v2/connections": + EncodeResponseBody(c, w, map[string]any{ + "type": "sync", + "result": nil, + }) + default: + c.Error("this should never be reached") + } + }) } func (s *RunSuite) TestInvalidParameters(c *check.C) { @@ -218,6 +232,20 @@ c.Check(execEnv, testutil.Contains, fmt.Sprintf("TMPDIR=%s", tmpdir)) } +func checkHintFileNotLocked(c *check.C, snapName string) { + flock, err := openHintFileLock(snapName) + c.Assert(err, check.IsNil) + c.Check(flock.TryLock(), check.IsNil) + flock.Close() +} + +func checkHintFileLocked(c *check.C, snapName string) { + flock, err := openHintFileLock(snapName) + c.Assert(err, check.IsNil) + c.Check(flock.TryLock(), check.Equals, osutil.ErrAlreadyLocked) + flock.Close() +} + func (s *RunSuite) TestSnapRunAppRunsChecksInhibitionLock(c *check.C) { defer mockSnapConfine(dirs.DistroLibExecDir)() @@ -227,6 +255,9 @@ var execArg0 string var execArgs []string restorer := snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { + // lock should be released before calling snap-confine using beforeExec() callback + checkHintFileNotLocked(c, "snapname") + execArg0 = arg0 execArgs = args return nil @@ -244,16 +275,15 @@ c.Check(snapName, check.Equals, "snapname") c.Check(ctx, check.NotNil) - cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, nil) + var err error + flock, err = openHintFileLock(snapName) c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - if notInhibited != nil { - c.Errorf("this should never be reached") - } + // mock held lock and check that it is released after snap run finishes + c.Assert(flock.ReadLock(), check.IsNil) - flock, err = openHintFileLock(snapName) + err = notInhibited(ctx) c.Assert(err, check.IsNil) + return flock, nil }) defer restore() @@ -268,6 +298,144 @@ "snap.snapname.app", filepath.Join(dirs.CoreLibExecDir, "snap-exec"), "snapname.app", "--arg1"}) + + // lock should be released now + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestSnapRunAppRefreshAppAwarenessUnsetSkipsInhibitionLockCheck(c *check.C) { + defer mockSnapConfine(dirs.DistroLibExecDir)() + + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x2")}) + + restorer := snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { + return nil + }) + defer restorer() + + // mark snap as inhibited + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R("x2")} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + // unset refresh-app-awareness flag + c.Assert(os.RemoveAll(features.RefreshAppAwareness.ControlFile()), check.IsNil) + + restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + return nil, fmt.Errorf("runinhibit.WaitWhileInhibited should not have been called") + }) + defer restore() + + _, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.app", "--arg1"}) + c.Assert(err, check.IsNil) +} + +func (s *RunSuite) TestSnapRunAppNewRevisionAfterInhibition(c *check.C) { + defer mockSnapConfine(dirs.DistroLibExecDir)() + + // mock installed snap + snaptest.MockSnap(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x2")}) + + var execEnv []string + restorer := snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { + execEnv = envv + return nil + }) + defer restorer() + + // mark snap as inhibited + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R("x2")} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) + // unset refresh-app-awareness flag + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + var called bool + restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + called = true + c.Check(snapName, check.Equals, "snapname") + + var err error + flock, err = openHintFileLock(snapName) + c.Assert(err, check.IsNil) + c.Assert(flock.ReadLock(), check.IsNil) + + // snap is inhibited for sometime + for i := 0; i < 3; i++ { + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &runinhibit.InhibitInfo{Previous: snap.R("x2")}) + c.Assert(err, check.IsNil) + // non-service apps should keep waiting + c.Check(cont, check.Equals, false) + } + + // mock installed snap's new revision with current symlink + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x3")}) + + // snap is not inhibited anymore + err = notInhibited(ctx) + c.Assert(err, check.IsNil) + + return flock, nil + }) + defer restore() + + rest, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.app", "--arg1"}) + c.Assert(err, check.IsNil) + c.Check(called, check.Equals, true) + c.Assert(rest, check.DeepEquals, []string{"snapname.app", "--arg1"}) + // Check snap-confine points to latest revision + c.Check(execEnv, testutil.Contains, "SNAP_REVISION=x3") + + // lock should be released now + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestSnapRunAppMissingAppAfterInhibition(c *check.C) { + defer mockSnapConfine(dirs.DistroLibExecDir)() + + const mockYaml1 = `name: snapname +version: 1.0 +apps: + app-1: + command: run-app +` + const mockYaml2 = `name: snapname +version: 1.1 +apps: + app-2: + command: run-app +` + + // mock installed snap + snaptest.MockSnap(c, string(mockYaml1), &snap.SideInfo{Revision: snap.R("x2")}) + + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + var called bool + restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + called = true + c.Check(snapName, check.Equals, "snapname") + + // snap is inhibited + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &runinhibit.InhibitInfo{Previous: snap.R("x2")}) + c.Assert(err, check.IsNil) + // non-service apps should keep waiting + c.Check(cont, check.Equals, false) + + // mock installed snap's new revision with current symlink + snaptest.MockSnapCurrent(c, string(mockYaml2), &snap.SideInfo{Revision: snap.R("x3")}) + + // snap is not inhibited anymore + err = notInhibited(ctx) + c.Assert(err, check.ErrorMatches, `cannot find app "app-1" in "snapname"`) + return nil, err + }) + defer restore() + + _, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.app-1", "--arg1"}) + c.Assert(err, check.ErrorMatches, `cannot find app "app-1" in "snapname"`) + c.Check(called, check.Equals, true) } func (s *RunSuite) TestSnapRunHookNoRuninhibit(c *check.C) { @@ -290,11 +458,8 @@ }) defer restorer() - var called bool restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - called = true - c.Errorf("runinhibit.WaitWhileInhibited should not have been called") - return nil, nil + return nil, fmt.Errorf("runinhibit.WaitWhileInhibited should not have been called") }) defer restore() @@ -313,7 +478,6 @@ filepath.Join(dirs.CoreLibExecDir, "snap-exec"), "--hook=configure", "snapname"}) c.Check(execEnv, testutil.Contains, "SNAP_REVISION=42") - c.Check(called, check.Equals, false) } func (s *RunSuite) TestSnapRunAppRuninhibitSkipsServices(c *check.C) { @@ -324,9 +488,11 @@ var execArg0 string var execArgs []string + var execEnv []string restorer := snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { execArg0 = arg0 execArgs = args + execEnv = envv return nil }) defer restorer() @@ -336,11 +502,23 @@ c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) - var called bool + var called int restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - called = true - c.Errorf("runinhibit.WaitWhileInhibited should not have been called") - return nil, nil + called++ + c.Check(snapName, check.Equals, "snapname") + + var err error + flock, err = openHintFileLock(snapName) + c.Assert(err, check.IsNil) + c.Assert(flock.ReadLock(), check.IsNil) + + // snap is inhibited + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &inhibitInfo) + c.Assert(err, check.IsNil) + // services should not be blocked waiting + c.Check(cont, check.Equals, true) + + return flock, nil }) defer restore() @@ -352,12 +530,417 @@ rest, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.svc"}) c.Assert(err, check.IsNil) - c.Check(called, check.Equals, false) + c.Check(called, check.Equals, 1) c.Assert(rest, check.DeepEquals, []string{"snapname.svc"}) c.Check(execArg0, check.Equals, filepath.Join(dirs.DistroLibExecDir, "snap-confine")) c.Check(execArgs, check.DeepEquals, []string{ filepath.Join(dirs.DistroLibExecDir, "snap-confine"), "snap.snapname.svc", filepath.Join(dirs.CoreLibExecDir, "snap-exec"), "snapname.svc"}) + c.Check(execEnv, testutil.Contains, "SNAP_REVISION=x2") + + // lock should be released now + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestSnapRunAppHintUnlockedOnSnapConfineFailure(c *check.C) { + defer mockSnapConfine(dirs.DistroLibExecDir)() + + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x2")}) + + // mock not-inhibited empty hint + c.Assert(os.MkdirAll(runinhibit.InhibitDir, 0755), check.IsNil) + c.Assert(os.WriteFile(runinhibit.HintFile("snapname"), []byte(""), 0644), check.IsNil) + + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore := snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + var confirmCgroupCalled int + restore = snaprun.MockConfirmSystemdAppTracking(func(securityTag string) error { + confirmCgroupCalled++ + // force error before beforeExec is called + return fmt.Errorf("boom") + }) + defer restore() + + _, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.app", "--arg1"}) + c.Assert(err, check.ErrorMatches, "boom") + c.Check(confirmCgroupCalled, check.Equals, 1) + + // lock should be released on failure + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestSnapRunAppHintLockedUntilTrackingCgroupIsCreated(c *check.C) { + defer mockSnapConfine(dirs.DistroLibExecDir)() + + var execArg0 string + var execArgs []string + restore := snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { + execArg0 = arg0 + execArgs = args + return nil + }) + defer restore() + + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x2")}) + + // mock not-inhibited empty hint + c.Assert(os.MkdirAll(runinhibit.InhibitDir, 0755), check.IsNil) + c.Assert(os.WriteFile(runinhibit.HintFile("snapname"), []byte(""), 0644), check.IsNil) + + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore = snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + var confirmCgroupCalled int + restore = snaprun.MockConfirmSystemdAppTracking(func(securityTag string) error { + confirmCgroupCalled++ + // hint file must be locked until transient cgroup is created + checkHintFileLocked(c, "snapname") + return nil + }) + defer restore() + + rest, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.app", "--arg1"}) + c.Assert(err, check.IsNil) + c.Assert(rest, check.DeepEquals, []string{"snapname.app", "--arg1"}) + c.Check(execArg0, check.Equals, filepath.Join(dirs.DistroLibExecDir, "snap-confine")) + c.Check(execArgs, check.DeepEquals, []string{ + filepath.Join(dirs.DistroLibExecDir, "snap-confine"), + "snap.snapname.app", + filepath.Join(dirs.CoreLibExecDir, "snap-exec"), + "snapname.app", "--arg1"}) + c.Check(confirmCgroupCalled, check.Equals, 1) + + // lock should be released on failure + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) testSnapRunAppRetryNoInhibitHintFileThenOngoingRefresh(c *check.C, svc bool) { + logbuf, restore := logger.MockLogger() + defer restore() + + defer mockSnapConfine(dirs.DistroLibExecDir)() + + var execEnv []string + restore = snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { + execEnv = envv + return nil + }) + defer restore() + + // mock installed snap + si := snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x2")}) + + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + var startCalled, finishCalled int + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + startCalled++ + return nil + }, + finish: func(ctx context.Context) error { + finishCalled++ + return nil + }, + } + restore = snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + var waitWhileInhibitedCalled int + restore = snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + waitWhileInhibitedCalled++ + + c.Check(snapName, check.Equals, "snapname") + if waitWhileInhibitedCalled == 1 { + err := notInhibited(ctx) + c.Assert(err, check.IsNil) + + // mock snap inhibited to trigger race condition detection + // i.e. we started without a hint lock file (snap on first install) + // then a refresh started which created the hint lock file. + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, runinhibit.InhibitInfo{Previous: snap.R("x2")}), check.IsNil) + + // nil FileLock means no inhibit file exists + return nil, nil + } else { + var err error + + flock, err = openHintFileLock(snapName) + c.Assert(err, check.IsNil) + c.Assert(flock.ReadLock(), check.IsNil) + + // snap is inhibited + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &runinhibit.InhibitInfo{Previous: snap.R("x2")}) + c.Check(err, check.IsNil) + c.Check(cont, check.Equals, false) + + // remove current symlink to add another "current" revision + c.Assert(os.RemoveAll(filepath.Join(si.MountDir(), "../current")), check.IsNil) + // update current snap revision + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x3")}) + + // snap is not inhibited anymore + err = notInhibited(ctx) + c.Assert(err, check.IsNil) + + return flock, nil + } + }) + defer restore() + + var createCgroupCalled int + restore = snaprun.MockCreateTransientScopeForTracking(func(securityTag string, opts *cgroup.TrackingOptions) error { + createCgroupCalled++ + return nil + }) + defer restore() + + var confirmCgroupCalled int + confirmCgroup := func(securityTag string) error { + confirmCgroupCalled++ + if createCgroupCalled >= 1 || svc { + // tracking cgroup was already created + return nil + } + // no tracking cgroup exists for current process + return cgroup.ErrCannotTrackProcess + } + + if svc { + restore = snaprun.MockConfirmSystemdServiceTracking(confirmCgroup) + } else { + restore = snaprun.MockConfirmSystemdAppTracking(confirmCgroup) + } + defer restore() + + cmd := "snapname.app" + if svc { + cmd = "snapname.svc" + } + + _, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--debug-log", "--", cmd}) + c.Assert(err, check.IsNil) + + if svc { + // no retry, sinlge call + c.Check(waitWhileInhibitedCalled, check.Equals, 1) + c.Check(confirmCgroupCalled, check.Equals, 1) + // service cgroup already created + c.Check(createCgroupCalled, check.Equals, 0) + // Check service continued with initial revision + c.Check(execEnv, testutil.Contains, "SNAP_REVISION=x2") + // notification flow is not started for services + c.Check(startCalled, check.Equals, 0) + c.Check(finishCalled, check.Equals, 0) + // check no retry logs + c.Check(strings.Contains(logbuf.String(), "retry due to possible snap refresh conflict detected"), check.Equals, false) + } else { + // two calls due to retry + c.Check(waitWhileInhibitedCalled, check.Equals, 2) + c.Check(confirmCgroupCalled, check.Equals, 2) + // cgroup must only be created once and reused for further retries + // to avoid leaking cgroups + c.Check(createCgroupCalled, check.Equals, 1) + // Check snap-confine points to latest revision + c.Check(execEnv, testutil.Contains, "SNAP_REVISION=x3") + // notification flow started and finished + c.Check(startCalled, check.Equals, 1) + c.Check(finishCalled, check.Equals, 1) + // check retry behavior is logged + c.Check(logbuf.String(), testutil.Contains, "retry due to possible snap refresh conflict detected") + } + + // lock should be released now + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestSnapRunAppRetryNoInhibitHintFileThenOngoingRefresh(c *check.C) { + const svc = false + s.testSnapRunAppRetryNoInhibitHintFileThenOngoingRefresh(c, svc) +} + +func (s *RunSuite) TestSnapRunAppRetryNoInhibitHintFileThenOngoingRefreshService(c *check.C) { + const svc = true + s.testSnapRunAppRetryNoInhibitHintFileThenOngoingRefresh(c, svc) +} + +func (s *RunSuite) TestSnapRunAppRetryNoInhibitHintFileThenOngoingRefreshMissingCurrent(c *check.C) { + logbuf, restore := logger.MockLogger() + defer restore() + + defer mockSnapConfine(dirs.DistroLibExecDir)() + + var execEnv []string + restore = snaprun.MockSyscallExec(func(arg0 string, args []string, envv []string) error { + execEnv = envv + return nil + }) + defer restore() + + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + var startCalled, finishCalled int + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + startCalled++ + return nil + }, + finish: func(ctx context.Context) error { + finishCalled++ + return nil + }, + } + restore = snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + var waitWhileInhibitedCalled int + restore = snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + waitWhileInhibitedCalled++ + + c.Check(snapName, check.Equals, "snapname") + if waitWhileInhibitedCalled == 1 { + err := notInhibited(ctx) + // mock edge case where we started without a hint lock file + // and we have an ongoing refresh which removed current symlink. + c.Assert(err, testutil.ErrorIs, snaprun.ErrSnapRefreshConflict) + // and created the inhibition hint lock file. + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, runinhibit.InhibitInfo{Previous: snap.R("x2")}), check.IsNil) + return nil, err + } else { + var err error + + flock, err = openHintFileLock(snapName) + c.Assert(err, check.IsNil) + c.Assert(flock.ReadLock(), check.IsNil) + + // snap is inhibited + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R("x3")} + // update current snap revision + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x3")}) + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &inhibitInfo) + c.Check(err, check.IsNil) + c.Check(cont, check.Equals, false) + + // snap is not inhibited anymore + err = notInhibited(ctx) + c.Assert(err, check.IsNil) + + return flock, nil + } + }) + defer restore() + + var createCgroupCalled int + restore = snaprun.MockCreateTransientScopeForTracking(func(securityTag string, opts *cgroup.TrackingOptions) error { + createCgroupCalled++ + return nil + }) + defer restore() + + var confirmCgroupCalled int + confirmCgroup := func(securityTag string) error { + confirmCgroupCalled++ + if createCgroupCalled >= 1 { + // tracking cgroup was already created + return nil + } + // no tracking cgroup exists for current process + return cgroup.ErrCannotTrackProcess + } + + restore = snaprun.MockConfirmSystemdAppTracking(confirmCgroup) + defer restore() + + _, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--debug-log", "--", "snapname.app"}) + c.Assert(err, check.IsNil) + + // two calls due to retry + c.Check(waitWhileInhibitedCalled, check.Equals, 2) + // We entered snap-confine only once + c.Check(confirmCgroupCalled, check.Equals, 1) + c.Check(createCgroupCalled, check.Equals, 1) + // Check snap-confine points to latest revision + c.Check(execEnv, testutil.Contains, "SNAP_REVISION=x3") + // notification flow started and finished + c.Check(startCalled, check.Equals, 1) + c.Check(finishCalled, check.Equals, 1) + // check retry behavior is logged + c.Check(logbuf.String(), testutil.Contains, "cannot find current revision for snap snapname") + c.Check(logbuf.String(), testutil.Contains, "retry due to possible snap refresh conflict detected") + + // lock should be released now + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestSnapRunAppMaxRetry(c *check.C) { + defer mockSnapConfine(dirs.DistroLibExecDir)() + + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R("x2")}) + + c.Assert(os.MkdirAll(dirs.FeaturesDir, 0755), check.IsNil) + c.Assert(os.WriteFile(features.RefreshAppAwareness.ControlFile(), []byte(nil), 0644), check.IsNil) + + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore := snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + var called int + restore = snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + called++ + c.Check(snapName, check.Equals, "snapname") + + err := notInhibited(ctx) + c.Assert(err, check.IsNil) + + // mock snap inhibited to trigger race condition detection + // i.e. we started without a hint lock file (snap on first install) + // then a refresh started which created the hint lock file. + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, runinhibit.InhibitInfo{Previous: snap.R("x2")}), check.IsNil) + + // nil FileLock means no inhibit file exists + return nil, nil + }) + defer restore() + + _, err := snaprun.Parser(snaprun.Client()).ParseArgs([]string{"run", "--", "snapname.app", "--arg1"}) + c.Assert(err, check.ErrorMatches, "race condition detected, snap-run can only retry once") + // check we only retried once + c.Check(called, check.Equals, 2) } func (s *RunSuite) TestSnapRunClassicAppIntegration(c *check.C) { @@ -1867,378 +2450,6 @@ return osutil.NewFileLockWithMode(runinhibit.HintFile(snapName), 0644) } -func checkHintFileNotLocked(c *check.C, snapName string) { - flock, err := openHintFileLock(snapName) - c.Assert(err, check.IsNil) - c.Check(flock.TryLock(), check.IsNil) - flock.Close() -} - -func (s *RunSuite) TestWaitWhileInhibitedNoop(c *check.C) { - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedGateRefresh, inhibitInfo), check.IsNil) - - var called int - restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - called++ - - c.Check(snapName, check.Equals, "some-snap") - c.Check(ctx, check.NotNil) - for i := 0; i < 3; i++ { - cont, err := inhibited(ctx, runinhibit.HintInhibitedGateRefresh, nil) - c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - } - if notInhibited != nil { - c.Errorf("this should never be reached") - } - - flock, err := openHintFileLock(snapName) - c.Assert(err, check.IsNil) - return flock, nil - }) - defer restore() - - meter := &progresstest.Meter{} - defer progress.MockMeter(meter)() - - c.Assert(snaprun.WaitWhileInhibited(context.TODO(), "some-snap"), check.IsNil) - c.Check(called, check.Equals, 1) - - c.Check(meter.Values, check.HasLen, 0) - c.Check(meter.Written, check.HasLen, 0) - c.Check(meter.Finishes, check.Equals, 0) - c.Check(meter.Labels, check.HasLen, 0) - c.Check(meter.Labels, check.HasLen, 0) - - // lock must be released - checkHintFileNotLocked(c, "some-snap") -} - -func (s *RunSuite) TestWaitWhileInhibitedTextFlow(c *check.C) { - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedGateRefresh, inhibitInfo), check.IsNil) - - var called int - restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - called++ - - c.Check(snapName, check.Equals, "some-snap") - cont, err := inhibited(ctx, runinhibit.HintInhibitedGateRefresh, nil) - c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - cont, err = inhibited(ctx, runinhibit.HintInhibitedForRefresh, nil) - c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - if notInhibited != nil { - c.Errorf("this should never be reached") - } - - flock, err = openHintFileLock(snapName) - c.Assert(err, check.IsNil) - return flock, nil - }) - defer restore() - - c.Assert(snaprun.WaitWhileInhibited(context.TODO(), "some-snap"), check.IsNil) - c.Check(called, check.Equals, 1) - - c.Check(s.Stdout(), check.Equals, "snap package \"some-snap\" is being refreshed, please wait\n") - - // lock must be released - checkHintFileNotLocked(c, "some-snap") -} - -func (s *RunSuite) TestWaitWhileInhibitedDesktopIntegrationFlow(c *check.C) { - _, r := logger.MockLogger() - defer r() - - var dbusCalled int - conn, _, err := dbustest.InjectableConnection(func(msg *dbus.Message, n int) ([]*dbus.Message, error) { - dbusCalled++ - return []*dbus.Message{makeDBusMethodAvailableMessage(c, msg)}, nil - }) - c.Assert(err, check.IsNil) - - restore := dbusutil.MockOnlySessionBusAvailable(conn) - defer restore() - - restoreIsGraphicalSession := snaprun.MockIsGraphicalSession(true) - defer restoreIsGraphicalSession() - - var pendingRefreshNotificationCalled int - restorePendingRefreshNotification := snaprun.MockPendingRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.PendingSnapRefreshInfo) error { - pendingRefreshNotificationCalled++ - c.Error("this should never be reached") - return nil - }) - defer restorePendingRefreshNotification() - - var finishRefreshNotificationCalled int - restoreFinishRefreshNotification := snaprun.MockFinishRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.FinishedSnapRefreshInfo) error { - finishRefreshNotificationCalled++ - c.Error("this should never be reached") - return nil - }) - defer restoreFinishRefreshNotification() - - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) - - var called int - restore = snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - called++ - - c.Check(snapName, check.Equals, "some-snap") - for i := 0; i < 3; i++ { - cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, nil) - c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - } - if notInhibited != nil { - c.Errorf("this should never be reached") - } - - flock, err := openHintFileLock(snapName) - c.Assert(err, check.IsNil) - return flock, nil - }) - defer restore() - - c.Assert(snaprun.WaitWhileInhibited(context.TODO(), "some-snap"), check.IsNil) - c.Check(called, check.Equals, 1) - c.Check(s.Stdout(), check.Equals, "") - - // snapd-desktop-integration snap monitors inhibit file - // flow.Finish is a no-op, so it's only called once - c.Check(dbusCalled, check.Equals, 1) - // session flow was not called - c.Check(pendingRefreshNotificationCalled, check.Equals, 0) - c.Check(finishRefreshNotificationCalled, check.Equals, 0) - - // lock must be released - checkHintFileNotLocked(c, "some-snap") -} - -func (s *RunSuite) TestWaitWhileInhibitedGraphicalSessionFlow(c *check.C) { - _, r := logger.MockLogger() - defer r() - - originalCtx := context.Background() - - restoreIsGraphicalSession := snaprun.MockIsGraphicalSession(true) - defer restoreIsGraphicalSession() - - restoreTryNotifyRefresh := snaprun.MockTryNotifyRefreshViaSnapDesktopIntegrationFlow(func(ctx context.Context, snapName string) bool { - c.Check(snapName, check.Equals, "some-snap") - // check context is propagated properly - c.Assert(ctx, check.Equals, originalCtx) - c.Check(ctx.Err(), check.IsNil) - return false - }) - defer restoreTryNotifyRefresh() - - var pendingRefreshNotificationCalled int - restorePendingRefreshNotification := snaprun.MockPendingRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.PendingSnapRefreshInfo) error { - pendingRefreshNotificationCalled++ - // check context is propagated properly - c.Assert(ctx, check.Equals, originalCtx) - c.Check(ctx.Err(), check.IsNil) - c.Check(refreshInfo, check.DeepEquals, &usersessionclient.PendingSnapRefreshInfo{ - InstanceName: "some-snap", - TimeRemaining: 0, - }) - return nil - }) - defer restorePendingRefreshNotification() - - var finishRefreshNotificationCalled int - restoreFinishRefreshNotification := snaprun.MockFinishRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.FinishedSnapRefreshInfo) error { - finishRefreshNotificationCalled++ - // check context is propagated properly - c.Assert(ctx, check.Equals, originalCtx) - c.Check(ctx.Err(), check.IsNil) - c.Check(refreshInfo, check.DeepEquals, &usersessionclient.FinishedSnapRefreshInfo{ - InstanceName: "some-snap", - }) - return nil - }) - defer restoreFinishRefreshNotification() - - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) - - var called int - restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - called++ - - c.Check(snapName, check.Equals, "some-snap") - for i := 0; i < 3; i++ { - cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, nil) - c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - } - if notInhibited != nil { - c.Errorf("this should never be reached") - } - - flock, err := openHintFileLock(snapName) - c.Assert(err, check.IsNil) - return flock, nil - }) - defer restore() - - c.Assert(snaprun.WaitWhileInhibited(originalCtx, "some-snap"), check.IsNil) - c.Check(called, check.Equals, 1) - c.Check(s.Stdout(), check.Equals, "") - - c.Check(pendingRefreshNotificationCalled, check.Equals, 1) - c.Check(finishRefreshNotificationCalled, check.Equals, 1) - - // lock must be released - checkHintFileNotLocked(c, "some-snap") -} - -func (s *RunSuite) TestWaitWhileInhibitedGraphicalSessionFlowError(c *check.C) { - _, r := logger.MockLogger() - defer r() - - restoreIsGraphicalSession := snaprun.MockIsGraphicalSession(true) - defer restoreIsGraphicalSession() - - restoreTryNotifyRefresh := snaprun.MockTryNotifyRefreshViaSnapDesktopIntegrationFlow(func(ctx context.Context, snapName string) bool { - c.Check(snapName, check.Equals, "some-snap") - return false - }) - defer restoreTryNotifyRefresh() - - var pendingRefreshNotificationCalled int - restorePendingRefreshNotification := snaprun.MockPendingRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.PendingSnapRefreshInfo) error { - pendingRefreshNotificationCalled++ - c.Check(refreshInfo, check.DeepEquals, &usersessionclient.PendingSnapRefreshInfo{ - InstanceName: "some-snap", - TimeRemaining: 0, - }) - return fmt.Errorf("boom") - }) - defer restorePendingRefreshNotification() - - restoreFinishRefreshNotification := snaprun.MockFinishRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.FinishedSnapRefreshInfo) error { - c.Errorf("this should never be reached") - return nil - }) - defer restoreFinishRefreshNotification() - - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) - - restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - c.Check(snapName, check.Equals, "some-snap") - - _, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, nil) - c.Assert(err, check.ErrorMatches, "boom") - return nil, err - }) - defer restore() - - c.Assert(snaprun.WaitWhileInhibited(context.TODO(), "some-snap"), check.ErrorMatches, "boom") - - c.Check(pendingRefreshNotificationCalled, check.Equals, 1) -} - -func (s *RunSuite) TestWaitWhileInhibitedGraphicalSessionFlowErrorOnFinish(c *check.C) { - _, r := logger.MockLogger() - defer r() - - restoreIsGraphicalSession := snaprun.MockIsGraphicalSession(true) - defer restoreIsGraphicalSession() - - restoreTryNotifyRefresh := snaprun.MockTryNotifyRefreshViaSnapDesktopIntegrationFlow(func(ctx context.Context, snapName string) bool { - c.Check(snapName, check.Equals, "some-snap") - return false - }) - defer restoreTryNotifyRefresh() - - var pendingRefreshNotificationCalled int - restorePendingRefreshNotification := snaprun.MockPendingRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.PendingSnapRefreshInfo) error { - pendingRefreshNotificationCalled++ - c.Check(refreshInfo, check.DeepEquals, &usersessionclient.PendingSnapRefreshInfo{ - InstanceName: "some-snap", - TimeRemaining: 0, - }) - return nil - }) - defer restorePendingRefreshNotification() - - var finishRefreshNotificationCalled int - restoreFinishRefreshNotification := snaprun.MockFinishRefreshNotification(func(ctx context.Context, refreshInfo *usersessionclient.FinishedSnapRefreshInfo) error { - finishRefreshNotificationCalled++ - c.Check(refreshInfo, check.DeepEquals, &usersessionclient.FinishedSnapRefreshInfo{ - InstanceName: "some-snap", - }) - return fmt.Errorf("boom") - }) - defer restoreFinishRefreshNotification() - - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) - - restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { - c.Check(snapName, check.Equals, "some-snap") - - cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, nil) - c.Assert(err, check.IsNil) - // non-service apps should keep waiting - c.Check(cont, check.Equals, false) - if notInhibited != nil { - c.Errorf("this should never be reached") - } - - flock, err = openHintFileLock(snapName) - c.Assert(err, check.IsNil) - return flock, nil - }) - defer restore() - - c.Assert(snaprun.WaitWhileInhibited(context.TODO(), "some-snap"), check.ErrorMatches, "boom") - - c.Check(pendingRefreshNotificationCalled, check.Equals, 1) - c.Check(finishRefreshNotificationCalled, check.Equals, 1) - - // lock must be released - checkHintFileNotLocked(c, "some-snap") -} - -func (s *RunSuite) TestWaitWhileInhibitedContextCancellationOnError(c *check.C) { - inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} - c.Assert(runinhibit.LockWithHint("some-snap", runinhibit.HintInhibitedForRefresh, inhibitInfo), check.IsNil) - - restoreIsGraphicalSession := snaprun.MockIsGraphicalSession(true) - defer restoreIsGraphicalSession() - - originalCtx, cancel := context.WithCancel(context.Background()) - restoreTryNotifyRefresh := snaprun.MockTryNotifyRefreshViaSnapDesktopIntegrationFlow(func(ctx context.Context, snapName string) bool { - c.Check(snapName, check.Equals, "some-snap") - // check context is propagated properly - c.Assert(ctx, check.Equals, originalCtx) - c.Check(ctx.Err(), check.IsNil) - // cancel context to trigger cancellation error - cancel() - return true - }) - defer restoreTryNotifyRefresh() - - err := snaprun.WaitWhileInhibited(originalCtx, "some-snap") - c.Assert(err, check.ErrorMatches, "context canceled") - c.Assert(errors.Is(err, context.Canceled), check.Equals, true) - c.Assert(errors.Is(originalCtx.Err(), context.Canceled), check.Equals, true) -} - func (s *RunSuite) TestCreateSnapDirPermissions(c *check.C) { usr, err := user.Current() c.Assert(err, check.IsNil) @@ -2326,82 +2537,3 @@ // and we've let the user know that logging was enabled c.Check(logBuf.String(), testutil.Contains, "DEBUG: enabled debug logging of early snap startup") } - -func (s *RunSuite) TestDesktopIntegrationNoDBus(c *check.C) { - _, r := logger.MockLogger() - defer r() - - noDBus := func() (*dbus.Conn, error) { return nil, fmt.Errorf("dbus not available") } - restore := dbusutil.MockConnections(noDBus, noDBus) - defer restore() - - sent := snaprun.TryNotifyRefreshViaSnapDesktopIntegrationFlow(context.TODO(), "Test") - c.Assert(sent, check.Equals, false) -} - -func makeDBusMethodNotAvailableMessage(c *check.C, msg *dbus.Message) *dbus.Message { - return &dbus.Message{ - Type: dbus.TypeError, - Headers: map[dbus.HeaderField]dbus.Variant{ - dbus.FieldReplySerial: dbus.MakeVariant(msg.Serial()), - dbus.FieldSender: dbus.MakeVariant(":1"), // This does not matter. - // dbus.FieldDestination is provided automatically by DBus test helper. - dbus.FieldErrorName: dbus.MakeVariant("org.freedesktop.DBus.Error.UnknownMethod"), - }, - } -} - -func (s *RunSuite) TestDesktopIntegrationDBusAvailableNoMethod(c *check.C) { - _, r := logger.MockLogger() - defer r() - - conn, _, err := dbustest.InjectableConnection(func(msg *dbus.Message, n int) ([]*dbus.Message, error) { - return []*dbus.Message{makeDBusMethodNotAvailableMessage(c, msg)}, nil - }) - c.Assert(err, check.IsNil) - - restore := dbusutil.MockOnlySessionBusAvailable(conn) - defer restore() - - sent := snaprun.TryNotifyRefreshViaSnapDesktopIntegrationFlow(context.TODO(), "some-snap") - c.Assert(sent, check.Equals, false) -} - -func makeDBusMethodAvailableMessage(c *check.C, msg *dbus.Message) *dbus.Message { - c.Assert(msg.Type, check.Equals, dbus.TypeMethodCall) - c.Check(msg.Flags, check.Equals, dbus.Flags(0)) - - c.Check(msg.Headers, check.DeepEquals, map[dbus.HeaderField]dbus.Variant{ - dbus.FieldDestination: dbus.MakeVariant("io.snapcraft.SnapDesktopIntegration"), - dbus.FieldPath: dbus.MakeVariant(dbus.ObjectPath("/io/snapcraft/SnapDesktopIntegration")), - dbus.FieldInterface: dbus.MakeVariant("io.snapcraft.SnapDesktopIntegration"), - dbus.FieldMember: dbus.MakeVariant("ApplicationIsBeingRefreshed"), - dbus.FieldSignature: dbus.MakeVariant(dbus.SignatureOf("", "", make(map[string]dbus.Variant))), - }) - c.Check(msg.Body[0], check.Equals, "some-snap") - param2 := fmt.Sprintf("%s", msg.Body[1]) - c.Check(strings.HasSuffix(param2, "/var/lib/snapd/inhibit/some-snap.lock"), check.Equals, true) - return &dbus.Message{ - Type: dbus.TypeMethodReply, - Headers: map[dbus.HeaderField]dbus.Variant{ - dbus.FieldReplySerial: dbus.MakeVariant(msg.Serial()), - dbus.FieldSender: dbus.MakeVariant(":1"), // This does not matter. - }, - } -} - -func (s *RunSuite) TestDesktopIntegrationDBusAvailableMethodWorks(c *check.C) { - _, r := logger.MockLogger() - defer r() - - conn, _, err := dbustest.InjectableConnection(func(msg *dbus.Message, n int) ([]*dbus.Message, error) { - return []*dbus.Message{makeDBusMethodAvailableMessage(c, msg)}, nil - }) - c.Assert(err, check.IsNil) - - restore := dbusutil.MockOnlySessionBusAvailable(conn) - defer restore() - - sent := snaprun.TryNotifyRefreshViaSnapDesktopIntegrationFlow(context.TODO(), "some-snap") - c.Assert(sent, check.Equals, true) -} diff -Nru snapd-2.62+23.10/cmd/snap/cmd_services.go snapd-2.63+23.10/cmd/snap/cmd_services.go --- snapd-2.62+23.10/cmd/snap/cmd_services.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_services.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,6 +21,7 @@ import ( "fmt" + "os/user" "strconv" "github.com/jessevdk/go-flags" @@ -28,7 +29,6 @@ "github.com/snapcore/snapd/client" "github.com/snapcore/snapd/client/clientutil" "github.com/snapcore/snapd/i18n" - "github.com/snapcore/snapd/snap" ) type svcStatus struct { @@ -36,6 +36,8 @@ Positional struct { ServiceNames []serviceName } `positional-args:"yes"` + Global bool `long:"global" short:"g"` + User bool `long:"user" short:"u"` } type svcLogs struct { @@ -53,6 +55,14 @@ longServicesHelp = i18n.G(` The services command lists information about the services specified, or about the services in all currently installed snaps. + +If executed as root user, the 'Startup' column of any user service will be whether +it's globally enabled (i.e systemctl is-enabled). To view the actual 'Startup'|'Current' +status of the user services for the root user itself, --user can be provided. + +If executed as a non-root user, the 'Startup'|'Current' status of user services +will be the current status for the invoking user. To view the global enablement +status of user services, --global can be provided. `) shortLogsHelp = i18n.G("Retrieve logs for services") longLogsHelp = i18n.G(` @@ -83,7 +93,12 @@ // TRANSLATORS: This should not start with a lowercase letter. desc: i18n.G("A service specification, which can be just a snap name (for all services in the snap), or . for a single service."), }} - addCommand("services", shortServicesHelp, longServicesHelp, func() flags.Commander { return &svcStatus{} }, nil, argdescs) + addCommand("services", shortServicesHelp, longServicesHelp, func() flags.Commander { return &svcStatus{} }, map[string]string{ + // TRANSLATORS: This should not start with a lowercase letter. + "global": i18n.G("Show the global enable status for user services instead of the status for the current user."), + // TRANSLATORS: This should not start with a lowercase letter. + "user": i18n.G("Show the current status of the user services instead of the global enable status."), + }, argdescs) addCommand("logs", shortLogsHelp, longLogsHelp, func() flags.Commander { return &svcLogs{} }, timeDescs.also(map[string]string{ // TRANSLATORS: This should not start with a lowercase letter. @@ -117,12 +132,42 @@ return svcNames } +func (s *svcStatus) showGlobalEnablement(u *user.User) bool { + if u.Uid == "0" && !s.User { + return true + } else if u.Uid != "0" && s.Global { + return true + } + return false +} + +func (s *svcStatus) validateArguments() error { + // can't use --global and --user together + if s.Global && s.User { + return fmt.Errorf(i18n.G("cannot combine --global and --user switches.")) + } + return nil +} + func (s *svcStatus) Execute(args []string) error { if len(args) > 0 { return ErrExtraArgs } - services, err := s.client.Apps(svcNames(s.Positional.ServiceNames), client.AppOptions{Service: true}) + if err := s.validateArguments(); err != nil { + return err + } + + u, err := userCurrent() + if err != nil { + return fmt.Errorf(i18n.G("cannot get the current user: %s."), err) + } + + isGlobal := s.showGlobalEnablement(u) + services, err := s.client.Apps(svcNames(s.Positional.ServiceNames), client.AppOptions{ + Service: true, + Global: isGlobal, + }) if err != nil { return err } @@ -136,21 +181,9 @@ defer w.Flush() fmt.Fprintln(w, i18n.G("Service\tStartup\tCurrent\tNotes")) - for _, svc := range services { - startup := i18n.G("disabled") - if svc.Enabled { - startup = i18n.G("enabled") - } - current := i18n.G("inactive") - if svc.DaemonScope == snap.UserDaemon { - current = "-" - } else if svc.Active { - current = i18n.G("active") - } - fmt.Fprintf(w, "%s.%s\t%s\t%s\t%s\n", svc.Snap, svc.Name, startup, current, clientutil.ClientAppInfoNotes(svc)) + fmt.Fprintln(w, clientutil.FmtServiceStatus(svc, isGlobal)) } - return nil } diff -Nru snapd-2.62+23.10/cmd/snap/cmd_services_test.go snapd-2.63+23.10/cmd/snap/cmd_services_test.go --- snapd-2.62+23.10/cmd/snap/cmd_services_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_services_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,6 +23,7 @@ "encoding/json" "fmt" "net/http" + "os/user" "sort" "strings" "time" @@ -31,6 +32,7 @@ "github.com/snapcore/snapd/client" snap "github.com/snapcore/snapd/cmd/snap" + "github.com/snapcore/snapd/strutil" ) type appOpSuite struct { @@ -263,11 +265,115 @@ func (s *appOpSuite) TestAppStatus(c *check.C) { n := 0 + var hasGlobal bool + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0, 1, 2, 3: + c.Check(r.URL.Path, check.Equals, "/v2/apps") + if hasGlobal { + c.Check(r.URL.Query(), check.HasLen, 2) + } else { + c.Check(r.URL.Query(), check.HasLen, 1) + } + c.Check(r.URL.Query().Get("select"), check.Equals, "service") + c.Check(r.Method, check.Equals, "GET") + w.WriteHeader(200) + enc := json.NewEncoder(w) + enc.Encode(map[string]interface{}{ + "type": "sync", + "result": []map[string]interface{}{ + { + "snap": "foo", + "name": "bar", + "daemon": "oneshot", + "daemon-scope": "system", + "active": false, + "enabled": true, + "activators": []map[string]interface{}{ + {"name": "bar", "type": "timer", "active": true, "enabled": true}, + }, + }, { + "snap": "foo", + "name": "baz", + "daemon": "oneshot", + "daemon-scope": "system", + "active": false, + "enabled": true, + "activators": []map[string]interface{}{ + {"name": "baz-sock1", "type": "socket", "active": true, "enabled": true}, + {"name": "baz-sock2", "type": "socket", "active": false, "enabled": true}, + }, + }, { + "snap": "foo", + "name": "qux", + "daemon": "simple", + "daemon-scope": "user", + "active": false, + "enabled": true, + }, { + "snap": "foo", + "name": "zed", + "active": true, + "enabled": true, + }, + }, + "status": "OK", + "status-code": 200, + }) + default: + c.Fatalf("expected to get 1 requests, now on %d", n+1) + } + + n++ + }) + + tests := []struct { + uid string + arguments []string + userServiceLine string + }{ + {"0", []string{"services"}, "foo.qux enabled - user"}, + {"0", []string{"services", "--user"}, "foo.qux enabled inactive user"}, + {"1337", []string{"services"}, "foo.qux enabled inactive user"}, + {"1337", []string{"services", "--global"}, "foo.qux enabled - user"}, + } + + var testsRun int + for _, t := range tests { + testsRun++ + s.stdout.Reset() + hasGlobal = (t.uid == "0" && !strutil.ListContains(t.arguments, "--user")) || strutil.ListContains(t.arguments, "--global") + r := snap.MockUserCurrent(func() (*user.User, error) { + return &user.User{ + Uid: t.uid, + }, nil + }) + + rest, err := snap.Parser(snap.Client()).ParseArgs(t.arguments) + c.Check(err, check.IsNil) + c.Check(rest, check.HasLen, 0) + c.Check(s.Stderr(), check.Equals, "") + c.Check(s.Stdout(), check.Equals, fmt.Sprintf(`Service Startup Current Notes +foo.bar enabled inactive timer-activated +foo.baz enabled inactive socket-activated +%s +foo.zed enabled active - +`, t.userServiceLine)) + // ensure that the fake server api was actually hit + c.Check(n, check.Equals, testsRun) + + // restore the user mock + r() + } +} + +func (s *appOpSuite) TestAppStatusGlobal(c *check.C) { + n := 0 s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { switch n { case 0: c.Check(r.URL.Path, check.Equals, "/v2/apps") - c.Check(r.URL.Query(), check.HasLen, 1) + c.Check(r.URL.Query(), check.HasLen, 2) c.Check(r.URL.Query().Get("select"), check.Equals, "service") c.Check(r.Method, check.Equals, "GET") w.WriteHeader(200) @@ -319,7 +425,7 @@ n++ }) - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"services"}) + rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"services", "--global"}) c.Assert(err, check.IsNil) c.Assert(rest, check.HasLen, 0) c.Check(s.Stderr(), check.Equals, "") @@ -378,6 +484,26 @@ c.Check(n, check.Equals, 7) } +func (s *appOpSuite) TestAppStatusUserFailed(c *check.C) { + r := snap.MockUserCurrent(func() (*user.User, error) { + return nil, fmt.Errorf("oh-no") + }) + defer r() + _, err := snap.Parser(snap.Client()).ParseArgs([]string{"services"}) + c.Check(err, check.ErrorMatches, `cannot get the current user: oh-no.`) +} + +func (s *appOpSuite) TestAppStatusInvalidUserGlobalSwitches(c *check.C) { + r := snap.MockUserCurrent(func() (*user.User, error) { + return &user.User{ + Uid: "0", + }, nil + }) + defer r() + _, err := snap.Parser(snap.Client()).ParseArgs([]string{"services", "--global", "--user"}) + c.Check(err, check.ErrorMatches, `cannot combine --global and --user switches.`) +} + func (s *appOpSuite) TestAppStatusNoServices(c *check.C) { n := 0 s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { diff -Nru snapd-2.62+23.10/cmd/snap/cmd_sign.go snapd-2.63+23.10/cmd/snap/cmd_sign.go --- snapd-2.62+23.10/cmd/snap/cmd_sign.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_sign.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,8 @@ import ( "bytes" "fmt" - "io/ioutil" + "io" + "os" "github.com/jessevdk/go-flags" @@ -77,9 +78,9 @@ err error ) if !useStdin { - statement, err = ioutil.ReadFile(string(x.Positional.Filename)) + statement, err = os.ReadFile(string(x.Positional.Filename)) } else { - statement, err = ioutil.ReadAll(Stdin) + statement, err = io.ReadAll(Stdin) } if err != nil { return fmt.Errorf(i18n.G("cannot read assertion input: %v"), err) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_sign_build_test.go snapd-2.63+23.10/cmd/snap/cmd_sign_build_test.go --- snapd-2.62+23.10/cmd/snap/cmd_sign_build_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_sign_build_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" @@ -79,7 +78,7 @@ tempdir := c.MkDir() for _, fileName := range []string{"pubring.gpg", "secring.gpg", "trustdb.gpg"} { - data, err := ioutil.ReadFile(filepath.Join("test-data", fileName)) + data, err := os.ReadFile(filepath.Join("test-data", fileName)) c.Assert(err, IsNil) err = os.WriteFile(filepath.Join(tempdir, fileName), data, 0644) c.Assert(err, IsNil) @@ -114,7 +113,7 @@ tempdir := c.MkDir() for _, fileName := range []string{"pubring.gpg", "secring.gpg", "trustdb.gpg"} { - data, err := ioutil.ReadFile(filepath.Join("test-data", fileName)) + data, err := os.ReadFile(filepath.Join("test-data", fileName)) c.Assert(err, IsNil) err = os.WriteFile(filepath.Join(tempdir, fileName), data, 0644) c.Assert(err, IsNil) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_snap_op_test.go snapd-2.63+23.10/cmd/snap/cmd_snap_op_test.go --- snapd-2.62+23.10/cmd/snap/cmd_snap_op_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_snap_op_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" - "io/ioutil" + "io" "mime" "mime/multipart" "net/http" @@ -132,10 +132,9 @@ restore := snap.MockMaxGoneTime(time.Millisecond) defer restore() - // lazy way of getting a URL that won't work nor break stuff - server := httptest.NewServer(nil) - snap.ClientConfig.BaseURL = server.URL - server.Close() + // should always result in a connection refused error, since port zero isn't + // valid + snap.ClientConfig.BaseURL = "http://localhost:0" cli := snap.Client() chg, err := snap.Wait(cli, "x") @@ -948,7 +947,7 @@ c.Assert(err, check.IsNil) defer body.Close() filename = fheaders[0].Filename - content, err = ioutil.ReadAll(body) + content, err = io.ReadAll(body) c.Assert(err, check.IsNil) return name, filename, content @@ -1287,7 +1286,7 @@ c.Assert(err, check.IsNil) defer body.Close() - content, err := ioutil.ReadAll(body) + content, err := io.ReadAll(body) c.Assert(err, check.IsNil) contents = append(contents, content) filenames = append(filenames, h.Filename) diff -Nru snapd-2.62+23.10/cmd/snap/cmd_validate_test.go snapd-2.63+23.10/cmd/snap/cmd_validate_test.go --- snapd-2.62+23.10/cmd/snap/cmd_validate_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_validate_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,12 +21,12 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "gopkg.in/check.v1" - "github.com/snapcore/snapd/cmd/snap" + main "github.com/snapcore/snapd/cmd/snap" ) type validateSuite struct { @@ -45,7 +45,7 @@ c.Check(r.URL.Path, check.Equals, "/v2/validation-sets/foo/bar") c.Check(r.Method, check.Equals, "POST") - buf, err := ioutil.ReadAll(r.Body) + buf, err := io.ReadAll(r.Body) c.Assert(err, check.IsNil) switch { case sequence != 0 && action != "forget": diff -Nru snapd-2.62+23.10/cmd/snap/cmd_warnings_test.go snapd-2.63+23.10/cmd/snap/cmd_warnings_test.go --- snapd-2.62+23.10/cmd/snap/cmd_warnings_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/cmd_warnings_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -68,7 +68,7 @@ c.Check(r.URL.Path, check.Equals, "/v2/warnings") c.Check(r.URL.Query(), check.HasLen, 0) - buf, err := ioutil.ReadAll(r.Body) + buf, err := io.ReadAll(r.Body) c.Assert(err, check.IsNil) c.Check(string(buf), check.Equals, "") c.Check(r.Method, check.Equals, "GET") @@ -189,7 +189,7 @@ c.Check(r.URL.Path, check.Equals, "/v2/snaps") c.Check(r.URL.Query(), check.HasLen, 0) - buf, err := ioutil.ReadAll(r.Body) + buf, err := io.ReadAll(r.Body) c.Assert(err, check.IsNil) c.Check(string(buf), check.Equals, "") c.Check(r.Method, check.Equals, "GET") diff -Nru snapd-2.62+23.10/cmd/snap/export_test.go snapd-2.63+23.10/cmd/snap/export_test.go --- snapd-2.62+23.10/cmd/snap/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -38,7 +38,6 @@ "github.com/snapcore/snapd/store" "github.com/snapcore/snapd/store/tooling" "github.com/snapcore/snapd/testutil" - usersessionclient "github.com/snapcore/snapd/usersession/client" ) var RunMain = run @@ -133,27 +132,27 @@ } var ( - ClientSnapFromPath = clientSnapFromPath - SetupDiskSnap = (*infoWriter).setupDiskSnap - SetupSnap = (*infoWriter).setupSnap - MaybePrintServices = (*infoWriter).maybePrintServices - MaybePrintCommands = (*infoWriter).maybePrintCommands - MaybePrintType = (*infoWriter).maybePrintType - PrintSummary = (*infoWriter).printSummary - MaybePrintPublisher = (*infoWriter).maybePrintPublisher - MaybePrintNotes = (*infoWriter).maybePrintNotes - MaybePrintStandaloneVersion = (*infoWriter).maybePrintStandaloneVersion - MaybePrintBuildDate = (*infoWriter).maybePrintBuildDate - MaybePrintLinks = (*infoWriter).maybePrintLinks - MaybePrintBase = (*infoWriter).maybePrintBase - MaybePrintPath = (*infoWriter).maybePrintPath - MaybePrintSum = (*infoWriter).maybePrintSum - MaybePrintCohortKey = (*infoWriter).maybePrintCohortKey - MaybePrintHealth = (*infoWriter).maybePrintHealth - MaybePrintRefreshInfo = (*infoWriter).maybePrintRefreshInfo - WaitWhileInhibited = waitWhileInhibited - TryNotifyRefreshViaSnapDesktopIntegrationFlow = tryNotifyRefreshViaSnapDesktopIntegrationFlow - NewInhibitionFlow = newInhibitionFlow + ClientSnapFromPath = clientSnapFromPath + SetupDiskSnap = (*infoWriter).setupDiskSnap + SetupSnap = (*infoWriter).setupSnap + MaybePrintServices = (*infoWriter).maybePrintServices + MaybePrintCommands = (*infoWriter).maybePrintCommands + MaybePrintType = (*infoWriter).maybePrintType + PrintSummary = (*infoWriter).printSummary + MaybePrintPublisher = (*infoWriter).maybePrintPublisher + MaybePrintNotes = (*infoWriter).maybePrintNotes + MaybePrintStandaloneVersion = (*infoWriter).maybePrintStandaloneVersion + MaybePrintBuildDate = (*infoWriter).maybePrintBuildDate + MaybePrintLinks = (*infoWriter).maybePrintLinks + MaybePrintBase = (*infoWriter).maybePrintBase + MaybePrintPath = (*infoWriter).maybePrintPath + MaybePrintSum = (*infoWriter).maybePrintSum + MaybePrintCohortKey = (*infoWriter).maybePrintCohortKey + MaybePrintHealth = (*infoWriter).maybePrintHealth + MaybePrintRefreshInfo = (*infoWriter).maybePrintRefreshInfo + WaitWhileInhibited = waitWhileInhibited + NewInhibitionFlow = newInhibitionFlow + ErrSnapRefreshConflict = errSnapRefreshConflict ) func MockPollTime(d time.Duration) (restore func()) { @@ -357,6 +356,14 @@ } } +func MockConfirmSystemdAppTracking(fn func(securityTag string) error) (restore func()) { + old := cgroupConfirmSystemdAppTracking + cgroupConfirmSystemdAppTracking = fn + return func() { + cgroupConfirmSystemdAppTracking = old + } +} + func MockApparmorSnapAppFromPid(f func(pid int) (string, string, string, error)) (restore func()) { old := apparmorSnapAppFromPid apparmorSnapAppFromPid = f @@ -382,10 +389,10 @@ } func MockIoutilTempDir(f func(string, string) (string, error)) (restore func()) { - old := ioutilTempDir - ioutilTempDir = f + old := osMkdirTemp + osMkdirTemp = f return func() { - ioutilTempDir = old + osMkdirTemp = old } } @@ -427,37 +434,13 @@ return restore } -func MockIsGraphicalSession(graphical bool) (restore func()) { - old := isGraphicalSession - isGraphicalSession = func() bool { - return graphical - } - return func() { - isGraphicalSession = old - } -} - -func MockPendingRefreshNotification(f func(ctx context.Context, refreshInfo *usersessionclient.PendingSnapRefreshInfo) error) (restore func()) { - old := pendingRefreshNotification - pendingRefreshNotification = f - return func() { - pendingRefreshNotification = old - } -} - -func MockFinishRefreshNotification(f func(ctx context.Context, refreshInfo *usersessionclient.FinishedSnapRefreshInfo) error) (restore func()) { - old := finishRefreshNotification - finishRefreshNotification = f - return func() { - finishRefreshNotification = old +func MockInhibitionFlow(flow inhibitionFlow) (restore func()) { + old := newInhibitionFlow + newInhibitionFlow = func(cli *client.Client, name string) inhibitionFlow { + return flow } -} - -func MockTryNotifyRefreshViaSnapDesktopIntegrationFlow(f func(ctx context.Context, snapName string) bool) (restore func()) { - old := tryNotifyRefreshViaSnapDesktopIntegrationFlow - tryNotifyRefreshViaSnapDesktopIntegrationFlow = f return func() { - tryNotifyRefreshViaSnapDesktopIntegrationFlow = old + newInhibitionFlow = old } } diff -Nru snapd-2.62+23.10/cmd/snap/inhibit.go snapd-2.63+23.10/cmd/snap/inhibit.go --- snapd-2.62+23.10/cmd/snap/inhibit.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/inhibit.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,29 +21,76 @@ import ( "context" + "errors" "fmt" - "os" "time" - "github.com/godbus/dbus" + "github.com/snapcore/snapd/client" "github.com/snapcore/snapd/cmd/snaplock/runinhibit" - "github.com/snapcore/snapd/dbusutil" + "github.com/snapcore/snapd/features" "github.com/snapcore/snapd/i18n" "github.com/snapcore/snapd/logger" - "github.com/snapcore/snapd/usersession/client" + "github.com/snapcore/snapd/osutil" + "github.com/snapcore/snapd/snap" ) var runinhibitWaitWhileInhibited = runinhibit.WaitWhileInhibited -func waitWhileInhibited(ctx context.Context, snapName string) error { - flow := newInhibitionFlow(snapName) +// errSnapRefreshConflict indicates that a retry is needed because snap-run +// might have started without a hint lock file and now there is an ongoing refresh +// which could alter the current snap revision. +var errSnapRefreshConflict = fmt.Errorf("snap refresh conflict detected") + +// maybeWaitWhileInhibited is a wrapper for waitWhileInhibited that skips waiting +// if refresh-app-awareness flag is disabled. +func maybeWaitWhileInhibited(ctx context.Context, cli *client.Client, snapName string, appName string) (info *snap.Info, app *snap.AppInfo, hintFlock *osutil.FileLock, err error) { + // wait only if refresh-app-awareness flag is enabled + if features.RefreshAppAwareness.IsEnabled() { + return waitWhileInhibited(ctx, cli, snapName, appName) + } + + info, app, err = getInfoAndApp(snapName, appName, snap.R(0)) + if err != nil { + return nil, nil, nil, err + } + return info, app, nil, nil +} + +// waitWhileInhibited blocks until snap is not inhibited for refresh anymore and then +// returns a locked hint file lock along with the latest snap and app information. +// If the snap is inhibited for refresh, a notification flow is initiated during +// the inhibition period. +// +// NOTE: A snap without a hint file is considered not inhibited and a nil FileLock is returned. +// +// NOTE: It is the caller's responsibility to release the returned file lock. +func waitWhileInhibited(ctx context.Context, cli *client.Client, snapName string, appName string) (info *snap.Info, app *snap.AppInfo, hintFlock *osutil.FileLock, err error) { + var flow inhibitionFlow notified := false + notInhibited := func(ctx context.Context) (err error) { + // Get updated "current" snap info. + info, app, err = getInfoAndApp(snapName, appName, snap.R(0)) + // We might have started without a hint lock file and we have an + // ongoing refresh which removed current symlink. + if errors.As(err, &snap.NotFoundError{}) { + // Race condition detected + logger.Debugf("%v", err) + return errSnapRefreshConflict + } + return err + } inhibited := func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error) { if !notified { - // wait for HintInhibitedForRefresh set by gate-auto-refresh hook handler - // when it has finished; the hook starts with HintInhibitedGateRefresh lock - // and then either unlocks it or changes to HintInhibitedForRefresh (see - // gateAutoRefreshHookHandler in hooks.go). + flow = newInhibitionFlow(cli, snapName) + info, app, err = getInfoAndApp(snapName, appName, inhibitInfo.Previous) + if err != nil { + return false, err + } + // Don't wait, continue with old revision. + if app.IsService() { + return true, nil + } + // Don't start flow if we are not inhibited for refresh. if hint != runinhibit.HintInhibitedForRefresh { return false, nil } @@ -57,122 +104,96 @@ return false, nil } - hintFlock, err := runinhibitWaitWhileInhibited(ctx, snapName, nil, inhibited, 500*time.Millisecond) + // If the snap is inhibited from being used then postpone running it until + // that condition passes. + hintFlock, err = runinhibitWaitWhileInhibited(ctx, snapName, notInhibited, inhibited, 500*time.Millisecond) if err != nil { // It is fine to return an error here without finishing the notification // flow because we either failed because of it or before it, so it // should not have started in the first place. - return err - } - - // XXX: closing as we don't need it for now, this lock will be used in a later iteration - if hintFlock != nil { - hintFlock.Close() + return nil, nil, nil, err } if notified { if err := flow.FinishInhibitionNotification(ctx); err != nil { - return err + hintFlock.Close() + return nil, nil, nil, err } } - return nil + return info, app, hintFlock, nil } -type inhibitionFlow interface { - StartInhibitionNotification(ctx context.Context) error - FinishInhibitionNotification(ctx context.Context) error -} +func getInfoAndApp(snapName, appName string, rev snap.Revision) (*snap.Info, *snap.AppInfo, error) { + info, err := getSnapInfo(snapName, rev) + if err != nil { + return nil, nil, err + } -var newInhibitionFlow = func(instanceName string) inhibitionFlow { - if isGraphicalSession() { - return &graphicalFlow{instanceName: instanceName} + app, exists := info.Apps[appName] + if !exists { + return nil, nil, fmt.Errorf(i18n.G("cannot find app %q in %q"), appName, snapName) } - return &textFlow{instanceName: instanceName} -} -type textFlow struct { - instanceName string + return info, app, nil } -func (tf *textFlow) StartInhibitionNotification(ctx context.Context) error { - _, err := fmt.Fprintf(Stdout, i18n.G("snap package %q is being refreshed, please wait\n"), tf.instanceName) - // TODO: add proper progress spinner - return err +type inhibitionFlow interface { + StartInhibitionNotification(ctx context.Context) error + FinishInhibitionNotification(ctx context.Context) error } -func (tf *textFlow) FinishInhibitionNotification(ctx context.Context) error { - return nil +var newInhibitionFlow = func(cli *client.Client, instanceName string) inhibitionFlow { + return ¬icesFlow{instanceName: instanceName, cli: cli} } -type graphicalFlow struct { +type noticesFlow struct { instanceName string - notifiedDesktopIntegration bool + cli *client.Client } -func (gf *graphicalFlow) StartInhibitionNotification(ctx context.Context) error { - gf.notifiedDesktopIntegration = tryNotifyRefreshViaSnapDesktopIntegrationFlow(ctx, gf.instanceName) - if gf.notifiedDesktopIntegration { - return nil +func (gf *noticesFlow) StartInhibitionNotification(ctx context.Context) error { + opts := client.NotifyOptions{ + Type: client.SnapRunInhibitNotice, + Key: gf.instanceName, } - - // unable to use snapd-desktop-integration, let's fall back to graphical session flow - refreshInfo := client.PendingSnapRefreshInfo{ - InstanceName: gf.instanceName, - // remaining time = 0 results in "Snap .. is refreshing now" message from - // usersession agent. - TimeRemaining: 0, + _, err := gf.cli.Notify(&opts) + if err != nil { + return err } - return pendingRefreshNotification(ctx, &refreshInfo) -} -func (gf *graphicalFlow) FinishInhibitionNotification(ctx context.Context) error { - if gf.notifiedDesktopIntegration { - // snapd-desktop-integration detects inhibit unlock itself, do nothing - return nil + // Fallback to text notification if marker "snap-refresh-observe" + // interface is not connected and a terminal is detected. + if isStdoutTTY && !markerInterfaceConnected(gf.cli) { + fmt.Fprintf(Stderr, i18n.G("snap package %q is being refreshed, please wait\n"), gf.instanceName) } - // finish graphical session flow - finishRefreshInfo := client.FinishedSnapRefreshInfo{InstanceName: gf.instanceName} - return finishRefreshNotification(ctx, &finishRefreshInfo) + return nil +} + +func (gf *noticesFlow) FinishInhibitionNotification(ctx context.Context) error { + // snapd-desktop-integration (or any other client) should detect that the + // snap is no longer inhibited by itself, do nothing. + return nil } -var tryNotifyRefreshViaSnapDesktopIntegrationFlow = func(ctx context.Context, snapName string) (notified bool) { - // Check if Snapd-Desktop-Integration is available - conn, err := dbusutil.SessionBus() +func markerInterfaceConnected(cli *client.Client) bool { + // Check if marker interface "snap-refresh-observe" is connected. + connOpts := client.ConnectionOptions{ + Interface: "snap-refresh-observe", + } + connections, err := cli.Connections(&connOpts) if err != nil { - logger.Noticef("unable to connect dbus session: %v", err) + // Ignore error (maybe snapd is being updated) and fallback to + // text flow instead. return false } - obj := conn.Object("io.snapcraft.SnapDesktopIntegration", "/io/snapcraft/SnapDesktopIntegration") - extraParams := make(map[string]dbus.Variant) - err = obj.CallWithContext(ctx, "io.snapcraft.SnapDesktopIntegration.ApplicationIsBeingRefreshed", 0, snapName, runinhibit.HintFile(snapName), extraParams).Store() - if err != nil { - logger.Noticef("unable to successfully call io.snapcraft.SnapDesktopIntegration.ApplicationIsBeingRefreshed: %v", err) + if len(connections.Established) == 0 { + // Marker interface is not connected. + // No snap (i.e. snapd-desktop-integration) is listening, let's fallback + // to text flow. return false } return true } - -var isGraphicalSession = func() bool { - // TODO: uncomment once there is a proper UX review - //return os.Getenv("DISPLAY") != "" || os.Getenv("WAYLAND_DISPLAY") != "" - return false -} - -var pendingRefreshNotification = func(ctx context.Context, refreshInfo *client.PendingSnapRefreshInfo) error { - userclient := client.NewForUids(os.Getuid()) - if err := userclient.PendingRefreshNotification(ctx, refreshInfo); err != nil { - return err - } - return nil -} - -var finishRefreshNotification = func(ctx context.Context, refreshInfo *client.FinishedSnapRefreshInfo) error { - userclient := client.NewForUids(os.Getuid()) - if err := userclient.FinishRefreshNotification(ctx, refreshInfo); err != nil { - return err - } - return nil -} diff -Nru snapd-2.62+23.10/cmd/snap/inhibit_test.go snapd-2.63+23.10/cmd/snap/inhibit_test.go --- snapd-2.62+23.10/cmd/snap/inhibit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap/inhibit_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,464 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package main_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" + + "github.com/snapcore/snapd/client" + snaprun "github.com/snapcore/snapd/cmd/snap" + "github.com/snapcore/snapd/cmd/snaplock/runinhibit" + "github.com/snapcore/snapd/osutil" + "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/snap/snaptest" + "github.com/snapcore/snapd/testutil" + "gopkg.in/check.v1" + . "gopkg.in/check.v1" +) + +type fakeInhibitionFlow struct { + start func(ctx context.Context) error + finish func(ctx context.Context) error +} + +func (flow *fakeInhibitionFlow) StartInhibitionNotification(ctx context.Context) error { + if flow.start == nil { + return fmt.Errorf("StartInhibitionNotification is not implemented") + } + return flow.start(ctx) +} + +func (flow *fakeInhibitionFlow) FinishInhibitionNotification(ctx context.Context) error { + if flow.finish == nil { + return fmt.Errorf("FinishInhibitionNotification is not implemented") + } + return flow.finish(ctx) +} + +func (s *RunSuite) TestWaitWhileInhibitedRunThrough(c *C) { + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R(11)}) + + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, inhibitInfo), IsNil) + + var waitWhileInhibitedCalled int + restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + waitWhileInhibitedCalled++ + + c.Check(snapName, Equals, "snapname") + c.Check(ctx, NotNil) + for i := 0; i < 3; i++ { + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &inhibitInfo) + c.Assert(err, IsNil) + // non-service apps should keep waiting + c.Check(cont, Equals, false) + } + err := notInhibited(ctx) + c.Assert(err, IsNil) + + flock, err = openHintFileLock(snapName) + c.Assert(err, IsNil) + err = flock.ReadLock() + c.Assert(err, IsNil) + return flock, nil + }) + defer restore() + + var startCalled, finishCalled int + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + startCalled++ + return nil + }, + finish: func(ctx context.Context) error { + finishCalled++ + return nil + }, + } + restore = snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + info, app, hintLock, err := snaprun.WaitWhileInhibited(context.TODO(), snaprun.Client(), "snapname", "app") + defer hintLock.Unlock() + c.Assert(err, IsNil) + c.Check(info.InstanceName(), Equals, "snapname") + c.Check(app.Name, Equals, "app") + + c.Check(startCalled, Equals, 1) + c.Check(finishCalled, Equals, 1) + c.Check(waitWhileInhibitedCalled, Equals, 1) + checkHintFileLocked(c, "snapname") +} + +func (s *RunSuite) TestWaitWhileInhibitedErrorOnStartNotification(c *C) { + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R(11)}) + + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, inhibitInfo), IsNil) + + var startCalled, finishCalled int + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + startCalled++ + return fmt.Errorf("boom") + }, + finish: func(ctx context.Context) error { + finishCalled++ + return nil + }, + } + restore := snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + info, app, hintLock, err := snaprun.WaitWhileInhibited(context.TODO(), snaprun.Client(), "snapname", "app") + c.Assert(err, ErrorMatches, "boom") + c.Check(info, IsNil) + c.Check(app, IsNil) + c.Check(hintLock, IsNil) + + c.Check(startCalled, Equals, 1) + c.Check(finishCalled, Equals, 0) + // lock must be released + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestWaitWhileInhibitedErrorOnFinishNotification(c *C) { + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R(11)}) + + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, inhibitInfo), IsNil) + + var waitWhileInhibitedCalled int + restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + waitWhileInhibitedCalled++ + + c.Check(snapName, Equals, "snapname") + c.Check(ctx, NotNil) + for i := 0; i < 3; i++ { + cont, err := inhibited(ctx, runinhibit.HintInhibitedForRefresh, &inhibitInfo) + c.Assert(err, IsNil) + // non-service apps should keep waiting + c.Check(cont, Equals, false) + } + err := notInhibited(ctx) + c.Assert(err, IsNil) + + flock, err = openHintFileLock(snapName) + c.Assert(err, IsNil) + err = flock.ReadLock() + c.Assert(err, IsNil) + return flock, nil + }) + defer restore() + + var startCalled, finishCalled int + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + startCalled++ + return nil + }, + finish: func(ctx context.Context) error { + finishCalled++ + return fmt.Errorf("boom") + }, + } + restore = snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + info, app, hintLock, err := snaprun.WaitWhileInhibited(context.TODO(), snaprun.Client(), "snapname", "app") + c.Assert(err, ErrorMatches, "boom") + c.Check(info, IsNil) + c.Check(app, IsNil) + c.Check(hintLock, IsNil) + + c.Check(startCalled, Equals, 1) + c.Check(finishCalled, Equals, 1) + c.Check(waitWhileInhibitedCalled, Equals, 1) + // lock must be released + checkHintFileNotLocked(c, "snapname") +} + +func (s *RunSuite) TestWaitWhileInhibitedContextCancellationOnError(c *C) { + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R(11)}) + + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedForRefresh, inhibitInfo), IsNil) + + originalCtx, cancel := context.WithCancel(context.Background()) + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + // check context is propagated properly + c.Assert(ctx, Equals, originalCtx) + c.Check(ctx.Err(), IsNil) + // cancel context to trigger cancellation error + cancel() + return nil + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore := snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + _, _, _, err := snaprun.WaitWhileInhibited(originalCtx, snaprun.Client(), "snapname", "app") + c.Assert(err, ErrorMatches, "context canceled") + c.Assert(errors.Is(err, context.Canceled), Equals, true) + c.Assert(errors.Is(originalCtx.Err(), context.Canceled), Equals, true) +} + +func (s *RunSuite) TestWaitWhileInhibitedGateRefreshNoNotification(c *C) { + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R(11)}) + + inhibitInfo := runinhibit.InhibitInfo{Previous: snap.R(11)} + c.Assert(runinhibit.LockWithHint("snapname", runinhibit.HintInhibitedGateRefresh, inhibitInfo), IsNil) + + var called int + restore := snaprun.MockWaitWhileInhibited(func(ctx context.Context, snapName string, notInhibited func(ctx context.Context) error, inhibited func(ctx context.Context, hint runinhibit.Hint, inhibitInfo *runinhibit.InhibitInfo) (cont bool, err error), interval time.Duration) (flock *osutil.FileLock, retErr error) { + called++ + + c.Check(snapName, Equals, "snapname") + c.Check(ctx, NotNil) + for i := 0; i < 3; i++ { + cont, err := inhibited(ctx, runinhibit.HintInhibitedGateRefresh, &inhibitInfo) + c.Assert(err, IsNil) + // non-service apps should keep waiting + c.Check(cont, Equals, false) + } + err := notInhibited(ctx) + c.Assert(err, IsNil) + + flock, err = openHintFileLock(snapName) + c.Assert(err, IsNil) + err = flock.ReadLock() + c.Assert(err, IsNil) + return flock, nil + }) + defer restore() + + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore = snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + info, app, hintLock, err := snaprun.WaitWhileInhibited(context.TODO(), snaprun.Client(), "snapname", "app") + defer hintLock.Unlock() + c.Assert(err, IsNil) + c.Check(info.InstanceName(), Equals, "snapname") + c.Check(app.Name, Equals, "app") + + c.Check(called, Equals, 1) + checkHintFileLocked(c, "snapname") +} + +func (s *RunSuite) TestWaitWhileInhibitedNotInhibitedNoNotification(c *C) { + // mock installed snap + snaptest.MockSnapCurrent(c, string(mockYaml), &snap.SideInfo{Revision: snap.R(11)}) + + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore := snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + info, app, hintLock, err := snaprun.WaitWhileInhibited(context.TODO(), snaprun.Client(), "snapname", "app") + c.Assert(err, IsNil) + c.Assert(hintLock, IsNil) + c.Check(info.InstanceName(), Equals, "snapname") + c.Check(app.Name, Equals, "app") + + c.Check(runinhibit.HintFile("snapname"), testutil.FileAbsent) +} + +func (s *RunSuite) TestWaitWhileInhibitedNotInhibitHintFileOngoingRefresh(c *C) { + inhibitionFlow := fakeInhibitionFlow{ + start: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + finish: func(ctx context.Context) error { + return fmt.Errorf("this should never be reached") + }, + } + restore := snaprun.MockInhibitionFlow(&inhibitionFlow) + defer restore() + + _, _, hintLock, err := snaprun.WaitWhileInhibited(context.TODO(), snaprun.Client(), "snapname", "app") + c.Assert(err, testutil.ErrorIs, snaprun.ErrSnapRefreshConflict) + c.Assert(hintLock, IsNil) +} + +func (s *RunSuite) TestInhibitionFlow(c *C) { + restore := snaprun.MockIsStdoutTTY(true) + defer restore() + + var noticeCreated int + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/connections": + c.Assert(r.Method, check.Equals, "GET") + c.Check(r.URL.Query(), check.DeepEquals, url.Values{"interface": []string{"snap-refresh-observe"}}) + body, err := io.ReadAll(r.Body) + c.Assert(err, check.IsNil) + c.Check(body, check.DeepEquals, []byte{}) + EncodeResponseBody(c, w, map[string]any{ + "type": "sync", + "result": client.Connections{ + // mock snap exists with connected marker interface + Established: []client.Connection{{Interface: "snap-refresh-observe"}}, + }, + }) + case "/v2/notices": + noticeCreated++ + c.Assert(r.Method, check.Equals, "POST") + body, err := io.ReadAll(r.Body) + c.Assert(err, check.IsNil) + var noticeRequest map[string]string + c.Assert(json.Unmarshal(body, ¬iceRequest), check.IsNil) + c.Check(noticeRequest["action"], check.Equals, "add") + c.Check(noticeRequest["type"], check.Equals, "snap-run-inhibit") + c.Check(noticeRequest["key"], check.Equals, "some-snap") + EncodeResponseBody(c, w, map[string]any{ + "type": "sync", + "result": map[string]string{"id": "1"}, + }) + default: + c.Error("this should never be reached") + } + }) + + graphicalFlow := snaprun.NewInhibitionFlow(snaprun.Client(), "some-snap") + + c.Assert(graphicalFlow.StartInhibitionNotification(context.TODO()), IsNil) + // A snap-run-inhibit notice is always created + c.Check(noticeCreated, check.Equals, 1) + c.Check(s.Stderr(), Equals, "") + + c.Assert(graphicalFlow.FinishInhibitionNotification(context.TODO()), IsNil) + // Finish is no-op, no new notices + c.Check(noticeCreated, check.Equals, 1) + c.Check(s.Stderr(), Equals, "") +} + +func (s *RunSuite) testInhibitionFlowTextFallback(c *C, connectionsAPIErr bool) { + restore := snaprun.MockIsStdoutTTY(true) + defer restore() + + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/connections": + if connectionsAPIErr { + w.WriteHeader(500) + EncodeResponseBody(c, w, map[string]any{"type": "error"}) + } else { + EncodeResponseBody(c, w, map[string]any{"type": "sync", "result": nil}) + + } + case "/v2/notices": + EncodeResponseBody(c, w, map[string]any{"type": "sync", "result": map[string]string{"id": "1"}}) + default: + c.Error("this should never be reached") + } + }) + + graphicalFlow := snaprun.NewInhibitionFlow(snaprun.Client(), "some-snap") + + c.Assert(graphicalFlow.StartInhibitionNotification(context.TODO()), IsNil) + c.Check(s.Stderr(), Equals, "snap package \"some-snap\" is being refreshed, please wait\n") + + c.Assert(graphicalFlow.FinishInhibitionNotification(context.TODO()), IsNil) + // Finish is a noop + c.Check(s.Stderr(), Equals, "snap package \"some-snap\" is being refreshed, please wait\n") +} + +func (s *RunSuite) TestInhibitionFlowTextFallbackNoMarkerInterface(c *C) { + const connectionsAPIErr = false + s.testInhibitionFlowTextFallback(c, connectionsAPIErr) +} + +func (s *RunSuite) TestInhibitionFlowTextFallbackConnectionsAPIError(c *C) { + const connectionsAPIErr = true + s.testInhibitionFlowTextFallback(c, connectionsAPIErr) +} + +func (s *RunSuite) TestInhibitionFlowNoTTY(c *C) { + restore := snaprun.MockIsStdoutTTY(false) + defer restore() + + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/connections": + // No marker interface connected + EncodeResponseBody(c, w, map[string]any{"type": "sync", "result": nil}) + case "/v2/notices": + EncodeResponseBody(c, w, map[string]any{"type": "sync", "result": map[string]string{"id": "1"}}) + default: + c.Error("this should never be reached") + } + }) + + graphicalFlow := snaprun.NewInhibitionFlow(snaprun.Client(), "some-snap") + + c.Assert(graphicalFlow.StartInhibitionNotification(context.TODO()), IsNil) + // No TTY, no text notification + c.Check(s.Stderr(), Equals, "") + + c.Assert(graphicalFlow.FinishInhibitionNotification(context.TODO()), IsNil) + // No TTY, no text notification + c.Check(s.Stderr(), Equals, "") +} + +func (s *RunSuite) TestInhibitionFlowError(c *C) { + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/notices": + c.Assert(r.Method, check.Equals, "POST") + w.WriteHeader(500) + EncodeResponseBody(c, w, map[string]any{"type": "error"}) + default: + c.Error("this should never be reached") + } + }) + + graphicalFlow := snaprun.NewInhibitionFlow(snaprun.Client(), "some-snap") + c.Assert(graphicalFlow.StartInhibitionNotification(context.TODO()), ErrorMatches, `server error: "Internal Server Error"`) +} diff -Nru snapd-2.62+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts.go snapd-2.63+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts.go --- snapd-2.62+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -373,7 +372,7 @@ return err } preseedSeed, ok := currentSeed.(seed.PreseedCapable) - if ok { + if ok && preseedSeed.HasArtifact("preseed.tgz") { runMode := false if err := installApplyPreseededData(preseedSeed, boot.InitramfsWritableDir(model, runMode)); err != nil { return err @@ -1182,7 +1181,7 @@ // to get to this state, we needed to have mounted ubuntu-data on host, so // if encrypted, we can try to read the run key from host ubuntu-data saveKey := device.SaveKeyUnder(dirs.SnapFDEDirUnder(boot.InitramfsHostWritableDir(m.model))) - key, err := ioutil.ReadFile(saveKey) + key, err := os.ReadFile(saveKey) if err != nil { // log the error and skip to trying the fallback key m.degradedState.LogErrorf("cannot access run ubuntu-save key: %v", err) @@ -1774,7 +1773,7 @@ return false, fmt.Errorf("cannot find ubuntu-save encryption key at %v", saveKey) } // we have save.key, volume exists and is encrypted - key, err := ioutil.ReadFile(saveKey) + key, err := os.ReadFile(saveKey) if err != nil { return true, err } diff -Nru snapd-2.62+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go snapd-2.63+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go --- snapd-2.62+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -3774,7 +3773,7 @@ } func checkDegradedJSON(c *C, name string, exp map[string]interface{}) { - b, err := ioutil.ReadFile(filepath.Join(dirs.SnapBootstrapRunDir, name)) + b, err := os.ReadFile(filepath.Join(dirs.SnapBootstrapRunDir, name)) c.Assert(err, IsNil) degradedJSONObj := make(map[string]interface{}) err = json.Unmarshal(b, °radedJSONObj) @@ -8229,7 +8228,7 @@ {"fde-setup"}, }) - fdeSetupInput, err := ioutil.ReadFile(filepath.Join(s.tmpDir, "fde-setup.input")) + fdeSetupInput, err := os.ReadFile(filepath.Join(s.tmpDir, "fde-setup.input")) c.Assert(err, IsNil) c.Assert(fdeSetupInput, DeepEquals, []byte(`{"op":"features"}`)) diff -Nru snapd-2.62+23.10/cmd/snap-bootstrap/triggerwatch/triggerwatch_test.go snapd-2.63+23.10/cmd/snap-bootstrap/triggerwatch/triggerwatch_test.go --- snapd-2.62+23.10/cmd/snap-bootstrap/triggerwatch/triggerwatch_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-bootstrap/triggerwatch/triggerwatch_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,6 +22,7 @@ import ( "errors" "fmt" + "sync" "testing" "time" @@ -39,23 +40,33 @@ var _ = Suite(&triggerwatchSuite{}) type mockTriggerDevice struct { + sync.Mutex + waitForTriggerCalls int closeCalls int ev *triggerwatch.KeyEvent } func (m *mockTriggerDevice) WaitForTrigger(n chan triggerwatch.KeyEvent) { - m.waitForTriggerCalls++ - if m.ev != nil { - ev := *m.ev - ev.Dev = m - n <- ev - } + m.withLocked(func() { + m.waitForTriggerCalls++ + if m.ev != nil { + ev := *m.ev + ev.Dev = m + n <- ev + } + }) } func (m *mockTriggerDevice) String() string { return "mock-device" } func (m *mockTriggerDevice) Close() { m.closeCalls++ } +func (m *mockTriggerDevice) withLocked(f func()) { + m.Lock() + defer m.Unlock() + f() +} + type mockTrigger struct { f triggerwatch.TriggerCapabilityFilter d *mockTriggerDevice @@ -115,8 +126,10 @@ err := triggerwatch.Wait(testTriggerTimeout, testDeviceTimeout) c.Assert(err, Equals, triggerwatch.ErrTriggerNotDetected) c.Assert(mi.findMatchingCalls, Equals, 1) - c.Assert(md.waitForTriggerCalls, Equals, 1) - c.Assert(md.closeCalls, Equals, 1) + md.withLocked(func() { + c.Assert(md.waitForTriggerCalls, Equals, 1) + c.Assert(md.closeCalls, Equals, 1) + }) } func (s *triggerwatchSuite) TestNoDevsWaitNoMatching(c *C) { @@ -177,8 +190,10 @@ c.Assert(mi.findMatchingCalls, Equals, 1) c.Assert(mi.openCalls, Equals, 1) - c.Assert(md.waitForTriggerCalls, Equals, 1) - c.Assert(md.closeCalls, Equals, 1) + md.withLocked(func() { + c.Assert(md.waitForTriggerCalls, Equals, 1) + c.Assert(md.closeCalls, Equals, 1) + }) } func (s *triggerwatchSuite) TestUdevEventNoKeyEvent(c *C) { @@ -213,6 +228,8 @@ c.Assert(mi.findMatchingCalls, Equals, 1) c.Assert(mi.openCalls, Equals, 1) - c.Assert(md.waitForTriggerCalls, Equals, 1) - c.Assert(md.closeCalls, Equals, 1) + md.withLocked(func() { + c.Assert(md.waitForTriggerCalls, Equals, 1) + c.Assert(md.closeCalls, Equals, 1) + }) } diff -Nru snapd-2.62+23.10/cmd/snap-confine/mount-support.c snapd-2.63+23.10/cmd/snap-confine/mount-support.c --- snapd-2.62+23.10/cmd/snap-confine/mount-support.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-confine/mount-support.c 2024-04-24 00:00:39.000000000 +0000 @@ -718,32 +718,34 @@ // Ensure that hostfs exists and is group-owned by root. We may have (now // or earlier) created the directory as the user who first ran a snap on a // given system and the group identity of that user is visible on disk. - // This was LP:#1665004 - struct stat sb; - if (stat(SC_HOSTFS_DIR, &sb) < 0) { - if (errno == ENOENT) { - // Create the hostfs directory if one is missing. This directory is a part - // of packaging now so perhaps this code can be removed later. - // Note: we use 0000 as permissions here, to avoid the risk that - // the user manages to fiddle with the newly created directory - // before we have the chance to chown it to root:root. We are - // setting the usual 0755 permissions just after the chown below. - if (mkdir(SC_HOSTFS_DIR, 0000) < 0) { - die("cannot perform operation: mkdir %s", - SC_HOSTFS_DIR); - } - if (chown(SC_HOSTFS_DIR, 0, 0) < 0) { - die("cannot set root ownership on %s directory", - SC_HOSTFS_DIR); - } - if (chmod(SC_HOSTFS_DIR, 0755) < 0) { - die("cannot set 0755 permissions on %s directory", SC_HOSTFS_DIR); + // This was LP:#1665004. We do this by trying to create the hostfs directory + // if one is missing. This directory is a part of packaging now so perhaps + // this code can be removed later. Note: we use 0000 as permissions here, to + // avoid the risk that the user manages to fiddle with the newly created + // directory before we have the chance to chown it to root:root. We are + // setting the usual 0755 permissions just after the chown below. + if (mkdir(SC_HOSTFS_DIR, 0000) < 0) { + if (errno == EEXIST) { + // The directory exists, verify its ownership. + struct stat sb; + if (stat(SC_HOSTFS_DIR, &sb) < 0) { + die("cannot stat %s", SC_HOSTFS_DIR); + } else if (sb.st_uid != 0 || sb.st_gid != 0) { + die("%s is not owned by root", SC_HOSTFS_DIR); } } else { - die("cannot stat %s", SC_HOSTFS_DIR); + die("cannot perform operation: mkdir %s", + SC_HOSTFS_DIR); + } + } else { + if (chown(SC_HOSTFS_DIR, 0, 0) < 0) { + die("cannot set root ownership on %s directory", + SC_HOSTFS_DIR); + } + if (chmod(SC_HOSTFS_DIR, 0755) < 0) { + die("cannot set 0755 permissions on %s directory", + SC_HOSTFS_DIR); } - } else if (sb.st_uid != 0 || sb.st_gid != 0) { - die("%s is not owned by root", SC_HOSTFS_DIR); } // Make the upcoming "put_old" directory for pivot_root private so that // mount events don't propagate to any peer group. In practice pivot root diff -Nru snapd-2.62+23.10/cmd/snap-confine/seccomp-support-test.c snapd-2.63+23.10/cmd/snap-confine/seccomp-support-test.c --- snapd-2.62+23.10/cmd/snap-confine/seccomp-support-test.c 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-confine/seccomp-support-test.c 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include "seccomp-support-ext.c" +#include "seccomp-support.c" + +#include +#include + +static void make_seccomp_profile(struct sc_seccomp_file_header *hdr, int *fd, + char **fname) +{ + *fd = g_file_open_tmp(NULL, fname, NULL); + g_assert_true(*fd > 0); + int written = write(*fd, hdr, sizeof(struct sc_seccomp_file_header)); + g_assert_true(written == sizeof(struct sc_seccomp_file_header)); +} + +static void test_must_read_and_validate_header_from_file__happy(void) +{ + struct sc_seccomp_file_header hdr = { + .header[0] = 'S', + .header[1] = 'C', + .version = 1, + }; + char SC_CLEANUP(sc_cleanup_string) * profile = NULL; + int SC_CLEANUP(sc_cleanup_close) fd = 0; + make_seccomp_profile(&hdr, &fd, &profile); + + FILE *file SC_CLEANUP(sc_cleanup_file) = fopen(profile, "rb"); + sc_must_read_and_validate_header_from_file(file, profile, &hdr); + g_assert_true(file != NULL); +} + +static void test_must_read_and_validate_header_from_file__missing_file(void) +{ + struct sc_seccomp_file_header hdr; + const char *profile = "/path/to/missing/file"; + const char *expected_err = + "cannot open seccomp filter /path/to/missing/file: No such file or directory\n"; + + if (g_test_subprocess()) { + FILE *file SC_CLEANUP(sc_cleanup_file) = fopen(profile, "rb"); + sc_must_read_and_validate_header_from_file(file, profile, &hdr); + // the function above is expected to call die() + g_assert_not_reached(); + // reference "file" to keep the compiler from warning + // that "file" is unused + g_assert_null(file); + } + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_failed(); + g_test_trap_assert_stderr(expected_err); +} + +static void must_read_and_validate_header_from_file_dies_with(struct + sc_seccomp_file_header + hdr, const char + *err_msg) +{ + if (g_test_subprocess()) { + char SC_CLEANUP(sc_cleanup_string) * profile = NULL; + int SC_CLEANUP(sc_cleanup_close) fd = 0; + make_seccomp_profile(&hdr, &fd, &profile); + + FILE *file SC_CLEANUP(sc_cleanup_file) = fopen(profile, "rb"); + sc_must_read_and_validate_header_from_file(file, profile, &hdr); + // the function above is expected to call die() + g_assert_not_reached(); + // reference "file" to keep the compiler from warning + // that "file" is unused + g_assert_null(file); + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_failed(); + g_test_trap_assert_stderr(err_msg); +} + +static void test_must_read_and_validate_header_from_file__invalid_header(void) +{ + // when we stop supporting 14.04 we could just use hdr = {0} + struct sc_seccomp_file_header hdr; + memset(&hdr, 0, sizeof hdr); + const char *expected_err = "unexpected seccomp header: 00\n"; + must_read_and_validate_header_from_file_dies_with(hdr, expected_err); +} + +static void test_must_read_and_validate_header_from_file__invalid_version(void) +{ + struct sc_seccomp_file_header hdr = { + .header[0] = 'S', + .header[1] = 'C', + .version = 0, + }; + const char *expected_err = "unexpected seccomp file version: 0\n"; + must_read_and_validate_header_from_file_dies_with(hdr, expected_err); +} + +static void +test_must_read_and_validate_header_from_file__len_allow_too_big(void) +{ + struct sc_seccomp_file_header hdr = { + .header[0] = 'S', + .header[1] = 'C', + .version = 1, + .len_allow_filter = MAX_BPF_SIZE + 1, + }; + const char *expected_err = "allow filter size too big 32769\n"; + must_read_and_validate_header_from_file_dies_with(hdr, expected_err); +} + +static void +test_must_read_and_validate_header_from_file__len_allow_no_multiplier(void) +{ + struct sc_seccomp_file_header hdr = { + .header[0] = 'S', + .header[1] = 'C', + .version = 1, + .len_allow_filter = sizeof(struct sock_filter) + 1, + }; + const char *expected_err = + "allow filter size not multiple of sock_filter\n"; + must_read_and_validate_header_from_file_dies_with(hdr, expected_err); +} + +static void test_must_read_and_validate_header_from_file__len_deny_too_big(void) +{ + struct sc_seccomp_file_header hdr = { + .header[0] = 'S', + .header[1] = 'C', + .version = 1, + .len_deny_filter = MAX_BPF_SIZE + 1, + }; + const char *expected_err = "deny filter size too big 32769\n"; + must_read_and_validate_header_from_file_dies_with(hdr, expected_err); +} + +static void +test_must_read_and_validate_header_from_file__len_deny_no_multiplier(void) +{ + struct sc_seccomp_file_header hdr = { + .header[0] = 'S', + .header[1] = 'C', + .version = 1, + .len_deny_filter = sizeof(struct sock_filter) + 1, + }; + const char *expected_err = + "deny filter size not multiple of sock_filter\n"; + must_read_and_validate_header_from_file_dies_with(hdr, expected_err); +} + +static void __attribute__((constructor)) init(void) +{ + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/happy", + test_must_read_and_validate_header_from_file__happy); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/missing_file", + test_must_read_and_validate_header_from_file__missing_file); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/invalid_header", + test_must_read_and_validate_header_from_file__invalid_header); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/invalid_version", + test_must_read_and_validate_header_from_file__invalid_version); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/len_allow_too_big", + test_must_read_and_validate_header_from_file__len_allow_too_big); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/len_allow_no_multiplier", + test_must_read_and_validate_header_from_file__len_allow_no_multiplier); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/len_deny_too_big", + test_must_read_and_validate_header_from_file__len_deny_too_big); + g_test_add_func + ("/seccomp/must_read_and_validate_header_from_file/len_deny_no_multiplier", + test_must_read_and_validate_header_from_file__len_deny_no_multiplier); +} diff -Nru snapd-2.62+23.10/cmd/snap-confine/seccomp-support.c snapd-2.63+23.10/cmd/snap-confine/seccomp-support.c --- snapd-2.62+23.10/cmd/snap-confine/seccomp-support.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-confine/seccomp-support.c 2024-04-24 00:00:39.000000000 +0000 @@ -17,11 +17,13 @@ #include "config.h" #include "seccomp-support.h" +#include #include #include #include #include #include +#include #include #include #include @@ -45,6 +47,30 @@ typedef struct sock_filter bpf_instr; +// Keep in sync with snap-seccomp/main.go +// +// Header of a seccomp.bin2 filter file in native byte order. +struct __attribute__((__packed__)) sc_seccomp_file_header { + // header: "SC" + char header[2]; + // version: 0x1 + uint8_t version; + // flags + uint8_t unrestricted; + // unused + uint8_t padding[4]; + + // size of allow filter in byte + uint32_t len_allow_filter; + // size of deny filter in byte + uint32_t len_deny_filter; + // reserved for future use + uint8_t reserved2[112]; +}; + +static_assert(sizeof(struct sc_seccomp_file_header) == 128, + "unexpected struct size"); + static void validate_path_has_strict_perms(const char *path) { struct stat stat_buf; @@ -109,12 +135,94 @@ } } +static void sc_cleanup_sock_fprog(struct sock_fprog *prog) +{ + free(prog->filter); + prog->filter = NULL; +} + +static void sc_must_read_filter_from_file(FILE *file, uint32_t len_bytes, + char *what, struct sock_fprog *prog) +{ + if (len_bytes == 0) { + die("%s filter may only be empty in unrestricted profiles", + what); + } + prog->len = len_bytes / sizeof(struct sock_filter); + prog->filter = malloc(len_bytes); + if (prog->filter == NULL) { + die("cannot allocate %u bytes of memory for %s seccomp filter ", + len_bytes, what); + } + size_t num_read = + fread(prog->filter, 1, prog->len * sizeof(struct sock_filter), + file); + if (ferror(file)) { + die("cannot read %s filter", what); + } + if (num_read != len_bytes) { + die("short read for filter %s %zu != %i", what, num_read, + len_bytes); + } +} + +static void sc_must_read_and_validate_header_from_file(FILE *file, + const char *profile_path, + struct + sc_seccomp_file_header + *hdr) +{ + if (file == NULL) { + die("cannot open seccomp filter %s", profile_path); + } + size_t num_read = + fread(hdr, 1, sizeof(struct sc_seccomp_file_header), file); + if (ferror(file) != 0) { + die("cannot read seccomp profile %s", profile_path); + } + if (num_read < sizeof(struct sc_seccomp_file_header)) { + die("short read on seccomp header: %zu", num_read); + } + if (hdr->header[0] != 'S' || hdr->header[1] != 'C') { + die("unexpected seccomp header: %x%x", hdr->header[0], + hdr->header[1]); + } + if (hdr->version != 1) { + die("unexpected seccomp file version: %x", hdr->version); + } + if (hdr->len_allow_filter > MAX_BPF_SIZE) { + die("allow filter size too big %u", hdr->len_allow_filter); + } + if (hdr->len_allow_filter % sizeof(struct sock_filter) != 0) { + die("allow filter size not multiple of sock_filter"); + } + if (hdr->len_deny_filter > MAX_BPF_SIZE) { + die("deny filter size too big %u", hdr->len_deny_filter); + } + if (hdr->len_deny_filter % sizeof(struct sock_filter) != 0) { + die("deny filter size not multiple of sock_filter"); + } + struct stat stat_buf; + if (fstat(fileno(file), &stat_buf) != 0) { + die("cannot fstat the seccomp file"); + } + off_t expected_size = + sizeof(struct sc_seccomp_file_header) + hdr->len_allow_filter + + hdr->len_deny_filter; + if (stat_buf.st_size != expected_size) { + die("unexpected filesize %ju != %ju", stat_buf.st_size, + expected_size); + } +} + bool sc_apply_seccomp_profile_for_security_tag(const char *security_tag) { debug("loading bpf program for security tag %s", security_tag); char profile_path[PATH_MAX] = { 0 }; - sc_must_snprintf(profile_path, sizeof(profile_path), "%s/%s.bin", + struct sock_fprog SC_CLEANUP(sc_cleanup_sock_fprog) prog_allow = { 0 }; + struct sock_fprog SC_CLEANUP(sc_cleanup_sock_fprog) prog_deny = { 0 }; + sc_must_snprintf(profile_path, sizeof(profile_path), "%s/%s.bin2", filter_profile_dir, security_tag); // Wait some time for the security profile to show up. When @@ -151,24 +259,25 @@ // set on the system. validate_bpfpath_is_safe(profile_path); - /* The extra space has dual purpose. First of all, it is required to detect - * feof() while still being able to correctly read MAX_BPF_SIZE bytes of - * seccomp profile. In addition, because we treat the profile as a - * quasi-string and use sc_streq(), to compare it. The extra space is used - * as a way to ensure the result is a terminated string (though in practice - * it can contain embedded NULs any earlier position). Note that - * sc_read_seccomp_filter knows about the extra space and ensures that the - * buffer is never empty. */ - char bpf[MAX_BPF_SIZE + 1] = { 0 }; - size_t num_read = sc_read_seccomp_filter(profile_path, bpf, sizeof bpf); - if (sc_streq(bpf, "@unrestricted\n")) { + // when we stop supporting 14.04 we could just use hdr = {0} + struct sc_seccomp_file_header hdr; + memset(&hdr, 0, sizeof hdr); + FILE *file SC_CLEANUP(sc_cleanup_file) = fopen(profile_path, "rb"); + + sc_must_read_and_validate_header_from_file(file, profile_path, &hdr); + if (hdr.unrestricted & 0x1) { return false; } - struct sock_fprog prog = { - .len = num_read / sizeof(struct sock_filter), - .filter = (struct sock_filter *)bpf, - }; - sc_apply_seccomp_filter(&prog); + // populate allow + sc_must_read_filter_from_file(file, hdr.len_allow_filter, "allow", + &prog_allow); + sc_must_read_filter_from_file(file, hdr.len_deny_filter, "deny", + &prog_deny); + + // apply both filters + sc_apply_seccomp_filter(&prog_deny); + sc_apply_seccomp_filter(&prog_allow); + return true; } diff -Nru snapd-2.62+23.10/cmd/snap-confine/seccomp-support.h snapd-2.63+23.10/cmd/snap-confine/seccomp-support.h --- snapd-2.62+23.10/cmd/snap-confine/seccomp-support.h 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-confine/seccomp-support.h 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,7 @@ * sc_apply_seccomp_profile_for_security_tag applies a seccomp profile to the * current process. The filter is loaded from a pre-compiled bpf bytecode * stored in "/var/lib/snap/seccomp/bpf" using the security tag and the - * extension ".bin". All components along that path must be owned by root and + * extension ".bin2". All components along that path must be owned by root and * cannot be writable by UNIX _other_. * * The security tag is shared with other parts of snapd. diff -Nru snapd-2.62+23.10/cmd/snap-confine/snap-confine.apparmor.in snapd-2.63+23.10/cmd/snap-confine/snap-confine.apparmor.in --- snapd-2.62+23.10/cmd/snap-confine/snap-confine.apparmor.in 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-confine/snap-confine.apparmor.in 2024-04-24 00:00:39.000000000 +0000 @@ -3,7 +3,7 @@ @LIBEXECDIR@/snap-confine (attach_disconnected) { # Include any additional files that snapd chose to generate. - # - for $HOME on NFS + # - for $HOME on remote file system. # - for $HOME on encrypted media # # Those are discussed on https://forum.snapcraft.io/t/snapd-vs-upstream-kernel-vs-apparmor @@ -120,6 +120,9 @@ # To find if apparmor is enabled /sys/module/apparmor/parameters/enabled r, + # For detecting if we're in a container + /run/systemd/container r, + # Don't allow changing profile to unconfined or profiles that start with # '/'. Use 'unsafe' to support snap-exec on armhf and its reliance on # the environment for determining the capabilities of the architecture. @@ -148,8 +151,11 @@ # deny change_profile unsafe /** -> {unconfined,/**}, # change_profile unsafe /** -> **, - # reading seccomp filters - /{tmp/snap.rootfs_*/,}var/lib/snapd/seccomp/bpf/*.bin r, + # reading seccomp filters. + # Note 1: We still need to consider .bin extension because of global.bin file. + # Note 2: This rule is not needed because of rule '/var/lib/** rw', however we keep it because at + # some point we want to investigate if we can narrow the scope of the aforementioned rule. + /{tmp/snap.rootfs_*/,}var/lib/snapd/seccomp/bpf/*.bin{,2} r, # adding a missing bpf mount mount fstype=bpf options=(rw) bpf -> /sys/fs/bpf/, @@ -288,7 +294,7 @@ # pivot_root mediation in AppArmor is not complete. See LP: #1791711. # However, we can mediate the new_root and put_old to be what we expect, # and then deny directory creation within old_root to prevent trivial - # pivoting into a whitelisted path. + # pivoting into an allowlisted path. pivot_root oldroot=/tmp/snap.rootfs_*/var/lib/snapd/hostfs/ /tmp/snap.rootfs_*/, # Explicitly deny creating the old_root directory in case it is # inadvertently added somewhere else. While this doesn't resolve diff -Nru snapd-2.62+23.10/cmd/snap-confine/snap-confine.c snapd-2.63+23.10/cmd/snap-confine/snap-confine.c --- snapd-2.62+23.10/cmd/snap-confine/snap-confine.c 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-confine/snap-confine.c 2024-04-24 00:00:39.000000000 +0000 @@ -627,8 +627,18 @@ /* max wait time for /var/lib/snapd/cgroup/.devices to appear */ static const size_t DEVICES_FILE_MAX_WAIT = 120; -static bool is_device_cgroup_self_managed(const sc_invocation *inv) +struct sc_device_cgroup_options { + bool self_managed; + bool non_strict; +}; + +static void sc_get_device_cgroup_setup(const sc_invocation *inv, struct sc_device_cgroup_options + *devsetup) { + if (devsetup == NULL) { + die("internal error: devsetup is NULL"); + } + char info_path[PATH_MAX] = { 0 }; sc_must_snprintf(info_path, sizeof info_path, @@ -648,14 +658,22 @@ die("cannot open %s", info_path); } + sc_error *err SC_CLEANUP(sc_cleanup_error) = NULL; char *self_managed_value SC_CLEANUP(sc_cleanup_string) = NULL; - sc_error *err = NULL; if (sc_infofile_get_key (stream, "self-managed", &self_managed_value, &err) < 0) { sc_die_on_error(err); } + rewind(stream); + + char *non_strict_value SC_CLEANUP(sc_cleanup_string) = NULL; + if (sc_infofile_get_key(stream, "non-strict", &non_strict_value, &err) < + 0) { + sc_die_on_error(err); + } - return sc_streq(self_managed_value, "true"); + devsetup->self_managed = sc_streq(self_managed_value, "true"); + devsetup->non_strict = sc_streq(non_strict_value, "true"); } static sc_device_cgroup_mode device_cgroup_mode_for_snap(sc_invocation *inv) @@ -682,6 +700,7 @@ break; } } + return mode; } @@ -712,11 +731,18 @@ // Set up a device cgroup, unless the snap has been allowed to manage the // device cgroup by itself. - if (!is_device_cgroup_self_managed(inv)) { + struct sc_device_cgroup_options cgdevopts = { false, false }; + sc_get_device_cgroup_setup(inv, &cgdevopts); + bool in_container = sc_is_in_container(); + if (cgdevopts.self_managed) { + debug("device cgroup is self-managed by the snap"); + } else if (cgdevopts.non_strict) { + debug("device cgroup skipped, snap in non-strict confinement"); + } else if (in_container) { + debug("device cgroup skipped, executing inside a container"); + } else { sc_device_cgroup_mode mode = device_cgroup_mode_for_snap(inv); sc_setup_device_cgroup(inv->security_tag, mode); - } else { - debug("device cgroup is self-managed by the snap"); } /** diff -Nru snapd-2.62+23.10/cmd/snap-failure/cmd_snapd.go snapd-2.63+23.10/cmd/snap-failure/cmd_snapd.go --- snapd-2.62+23.10/cmd/snap-failure/cmd_snapd.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-failure/cmd_snapd.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -64,7 +63,7 @@ func prevRevision(snapName string) (string, error) { seqFile := filepath.Join(dirs.SnapSeqDir, snapName+".json") - content, err := ioutil.ReadFile(seqFile) + content, err := os.ReadFile(seqFile) if os.IsNotExist(err) { return "", errNoSnapd } diff -Nru snapd-2.62+23.10/cmd/snap-fde-keymgr/main.go snapd-2.63+23.10/cmd/snap-fde-keymgr/main.go --- snapd-2.62+23.10/cmd/snap-fde-keymgr/main.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-fde-keymgr/main.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "os" "strings" @@ -125,7 +124,7 @@ } if alreadyExists { // we already have the recovery key, read it back - maybeKey, err := ioutil.ReadFile(c.KeyFile) + maybeKey, err := os.ReadFile(c.KeyFile) if err != nil { return fmt.Errorf("cannot read existing recovery key file: %v", err) } @@ -149,7 +148,7 @@ } } case strings.HasPrefix(authz, "file:"): - authzKey, err := ioutil.ReadFile(authz[len("file:"):]) + authzKey, err := os.ReadFile(authz[len("file:"):]) if err != nil { return fmt.Errorf("cannot load authorization key: %v", err) } @@ -178,7 +177,7 @@ return fmt.Errorf("cannot remove recovery key from LUKS device: %v", err) } case strings.HasPrefix(authz, "file:"): - authzKey, err := ioutil.ReadFile(authz[len("file:"):]) + authzKey, err := os.ReadFile(authz[len("file:"):]) if err != nil { return fmt.Errorf("cannot load authorization key: %v", err) } diff -Nru snapd-2.62+23.10/cmd/snap-recovery-chooser/main_test.go snapd-2.63+23.10/cmd/snap-recovery-chooser/main_test.go --- snapd-2.62+23.10/cmd/snap-recovery-chooser/main_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-recovery-chooser/main_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "log/syslog" "net/http" "net/http/httptest" @@ -127,7 +126,7 @@ _, err := main.RunUI(exec.Command(mockCmd.Exe()), mockSystems) c.Assert(err, IsNil) - data, err := ioutil.ReadFile(tf) + data, err := os.ReadFile(tf) c.Assert(err, IsNil) var input *main.ChooserSystems err = json.Unmarshal(data, &input) @@ -266,7 +265,7 @@ {"tool"}, }) - capturedStdin, err := ioutil.ReadFile(capturedStdinPath) + capturedStdin, err := os.ReadFile(capturedStdinPath) c.Assert(err, IsNil) var stdoutSystems main.ChooserSystems err = json.Unmarshal(capturedStdin, &stdoutSystems) diff -Nru snapd-2.62+23.10/cmd/snap-repair/cmd_done_retry_skip_test.go snapd-2.63+23.10/cmd/snap-repair/cmd_done_retry_skip_test.go --- snapd-2.62+23.10/cmd/snap-repair/cmd_done_retry_skip_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-repair/cmd_done_retry_skip_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package main_test import ( - "io/ioutil" + "io" "os" "strconv" "syscall" @@ -74,7 +74,7 @@ err = repair.ParseArgs([]string{s}) c.Check(err, IsNil) - status, err := ioutil.ReadAll(rp) + status, err := io.ReadAll(rp) c.Assert(err, IsNil) c.Check(string(status), Equals, s+"\n") } diff -Nru snapd-2.62+23.10/cmd/snap-repair/runner.go snapd-2.63+23.10/cmd/snap-repair/runner.go --- snapd-2.62+23.10/cmd/snap-repair/runner.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-repair/runner.go 2024-04-24 00:00:39.000000000 +0000 @@ -27,7 +27,6 @@ "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -750,7 +749,7 @@ func findDevInfo16() (*deviceInfo, error) { workBS := asserts.NewMemoryBackstore() assertSeedDir := filepath.Join(dirs.SnapSeedDir, "assertions") - dc, err := ioutil.ReadDir(assertSeedDir) + dc, err := os.ReadDir(assertSeedDir) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/cmd/snap-repair/runner_test.go snapd-2.63+23.10/cmd/snap-repair/runner_test.go --- snapd-2.62+23.10/cmd/snap-repair/runner_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-repair/runner_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -154,7 +153,7 @@ func checkStateJSON(c *C, file string, exp map[string]interface{}) { stateFile := map[string]interface{}{} - b, err := ioutil.ReadFile(file) + b, err := os.ReadFile(file) c.Assert(err, IsNil) err = json.Unmarshal(b, &stateFile) c.Assert(err, IsNil) @@ -948,7 +947,7 @@ } func (s *runnerSuite) loadSequences(c *C) map[string][]*repair.RepairState { - data, err := ioutil.ReadFile(dirs.SnapRepairStateFile) + data, err := os.ReadFile(dirs.SnapRepairStateFile) c.Assert(err, IsNil) var x struct { Sequences map[string][]*repair.RepairState `json:"sequences"` @@ -1911,7 +1910,7 @@ } func (s *runScriptSuite) verifyRundir(c *C, names []string) { - dirents, err := ioutil.ReadDir(s.runDir) + dirents, err := os.ReadDir(s.runDir) c.Assert(err, IsNil) c.Assert(dirents, HasLen, len(names)) for i := range dirents { diff -Nru snapd-2.62+23.10/cmd/snap-seccomp/export_test.go snapd-2.63+23.10/cmd/snap-seccomp/export_test.go --- snapd-2.62+23.10/cmd/snap-seccomp/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-seccomp/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -19,11 +19,18 @@ package main +import ( + "os" + + "github.com/snapcore/snapd/testutil" +) + var ( Compile = compile SeccompResolver = seccompResolver VersionInfo = versionInfo GoSeccompFeatures = goSeccompFeatures + ExportBPF = exportBPF ) func MockArchDpkgArchitecture(f func() string) (restore func()) { @@ -65,3 +72,9 @@ seccompSyscalls = old } } + +func MockOsCreateTemp(f func(dir, pattern string) (*os.File, error)) (restore func()) { + restore = testutil.Backup(&osCreateTemp) + osCreateTemp = f + return restore +} diff -Nru snapd-2.62+23.10/cmd/snap-seccomp/main.go snapd-2.63+23.10/cmd/snap-seccomp/main.go --- snapd-2.62+23.10/cmd/snap-seccomp/main.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-seccomp/main.go 2024-04-24 00:00:39.000000000 +0000 @@ -210,14 +210,15 @@ import ( "bufio" "bytes" + "encoding/binary" "fmt" - "io/ioutil" + "io" "os" "strconv" "strings" "syscall" - "github.com/seccomp/libseccomp-golang" + seccomp "github.com/seccomp/libseccomp-golang" "github.com/snapcore/snapd/arch" "github.com/snapcore/snapd/osutil" @@ -583,11 +584,12 @@ errnoOnImplicitDenial int16 = C.EPERM ) -func parseLine(line string, secFilter *seccomp.ScmpFilter) error { +func parseLine(line string, secFilterAllow, secFilterDeny *seccomp.ScmpFilter) error { // ignore comments and empty lines if strings.HasPrefix(line, "#") || line == "" { return nil } + secFilter := secFilterAllow // regular line tokens := strings.Fields(line) @@ -604,6 +606,7 @@ if strings.HasPrefix(syscallName, "~") { action = seccomp.ActErrno.SetReturnCode(errnoOnExplicitDenial) syscallName = syscallName[1:] + secFilter = secFilterDeny } secSyscall, err := seccomp.GetSyscallFromName(syscallName) @@ -691,8 +694,11 @@ if err = secFilter.AddRuleConditionalExact(secSyscall, action, conds); err != nil { err = secFilter.AddRuleConditional(secSyscall, action, conds) } + if err != nil { + return fmt.Errorf("cannot add rule for line %q: %v", line, err) + } - return err + return nil } // used to mock in tests @@ -789,18 +795,116 @@ return seccomp.ActAllow } +var osCreateTemp = os.CreateTemp + +func exportBPF(fout *os.File, filter *seccomp.ScmpFilter) (bpfLen int64, err error) { + // TODO: use a common way to handle prefixed errors across snapd + errPrefixFmt := "cannot export bpf filter: %w" + + oldPos, err := fout.Seek(0, io.SeekCurrent) + if err != nil { + return 0, fmt.Errorf(errPrefixFmt, err) + } + if err := filter.ExportBPF(fout); err != nil { + return 0, fmt.Errorf(errPrefixFmt, err) + } + nowPos, err := fout.Seek(0, io.SeekCurrent) + if err != nil { + return 0, fmt.Errorf(errPrefixFmt, err) + } + + return nowPos - oldPos, nil +} + +// New .bin2 seccomp files are composed by the following header, and potentially one +// allow filter and/or one deny filter (if lenAllowFilter and lenDenyFilter are greater +// than 0 respectively). When more than one filter is loaded, the kernel applies +// the most restrictive action, thus any explicit deny will take precedence. +// This struct needs to be in sync with seccomp-support.c +type scSeccompFileHeader struct { + header [2]byte + version byte + // flags + unrestricted byte + // unused + padding [4]byte + // location of allow/deny, all offsets/len in bytes + lenAllowFilter uint32 + lenDenyFilter uint32 + // reserved for future use + reserved2 [112]byte +} + +func writeUnrestrictedFilter(outFile string) error { + hdr := scSeccompFileHeader{ + header: [2]byte{'S', 'C'}, + version: 0x1, + // tell snap-confine + unrestricted: 0x1, + } + fout, err := osutil.NewAtomicFile(outFile, 0644, 0, osutil.NoChown, osutil.NoChown) + if err != nil { + return err + } + defer fout.Cancel() + + if err := binary.Write(fout, arch.Endian(), hdr); err != nil { + return err + } + return fout.Commit() +} + +func writeSeccompFilter(outFile string, filterAllow, filterDeny *seccomp.ScmpFilter) error { + fout, err := osutil.NewAtomicFile(outFile, 0644, 0, osutil.NoChown, osutil.NoChown) + if err != nil { + return err + } + defer fout.Cancel() + + // Write preliminary header because we don't know the sizes of the + // seccomp filters yet and the only way to know is to export to + // a file (until seccomp_export_bpf_mem() becomes available) + hdr := scSeccompFileHeader{ + header: [2]byte{'S', 'C'}, + version: 0x1, + } + if err := binary.Write(fout, arch.Endian(), hdr); err != nil { + return err + } + allowSize, err := exportBPF(fout.File, filterAllow) + if err != nil { + return err + } + denySize, err := exportBPF(fout.File, filterDeny) + if err != nil { + return err + } + + // now write final header + hdr.lenAllowFilter = uint32(allowSize) + hdr.lenDenyFilter = uint32(denySize) + if _, err := fout.Seek(0, io.SeekStart); err != nil { + return err + } + if err := binary.Write(fout, arch.Endian(), hdr); err != nil { + return err + } + + return fout.Commit() +} + func compile(content []byte, out string) error { var err error - var secFilter *seccomp.ScmpFilter + var secFilterAllow, secFilterDeny *seccomp.ScmpFilter unrestricted, complain := preprocess(content) switch { case unrestricted: - return osutil.AtomicWrite(out, bytes.NewBufferString("@unrestricted\n"), 0644, 0) + return writeUnrestrictedFilter(out) case complain: var complainAct seccomp.ScmpAction = complainAction() - secFilter, err = seccomp.NewFilter(complainAct) + secFilterAllow, err = seccomp.NewFilter(complainAct) if err != nil { if complainAct != seccomp.ActAllow { // ActLog is only supported in newer versions @@ -808,9 +912,16 @@ // libseccomp-golang. Attempt to fall back to // ActAllow before erroring out. complainAct = seccomp.ActAllow - secFilter, err = seccomp.NewFilter(complainAct) + secFilterAllow, err = seccomp.NewFilter(complainAct) } } + if err != nil { + return fmt.Errorf("cannot create allow seccomp filter: %s", err) + } + secFilterDeny, err = seccomp.NewFilter(complainAct) + if err != nil { + return fmt.Errorf("cannot create deny seccomp filter: %s", err) + } // Set unrestricted to 'true' to fallback to the pre-ActLog // behavior of simply setting the allow filter without adding @@ -819,19 +930,26 @@ unrestricted = true } default: - secFilter, err = seccomp.NewFilter(seccomp.ActErrno.SetReturnCode(errnoOnImplicitDenial)) + secFilterAllow, err = seccomp.NewFilter(seccomp.ActErrno.SetReturnCode(errnoOnImplicitDenial)) + if err != nil { + return fmt.Errorf("cannot create seccomp filter: %s", err) + } + secFilterDeny, err = seccomp.NewFilter(seccomp.ActAllow) + if err != nil { + return fmt.Errorf("cannot create seccomp filter: %s", err) + } } - if err != nil { - return fmt.Errorf("cannot create seccomp filter: %s", err) + if err := addSecondaryArches(secFilterAllow); err != nil { + return err } - if err := addSecondaryArches(secFilter); err != nil { + if err := addSecondaryArches(secFilterDeny); err != nil { return err } if !unrestricted { scanner := bufio.NewScanner(bytes.NewBuffer(content)) for scanner.Scan() { - if err := parseLine(scanner.Text(), secFilter); err != nil { + if err := parseLine(scanner.Text(), secFilterAllow, secFilterDeny); err != nil { return fmt.Errorf("cannot parse line: %s", err) } } @@ -841,21 +959,14 @@ } if osutil.GetenvBool("SNAP_SECCOMP_DEBUG") { - secFilter.ExportPFC(os.Stdout) - } - - // write atomically - fout, err := osutil.NewAtomicFile(out, 0644, 0, osutil.NoChown, osutil.NoChown) - if err != nil { - return err + secFilterAllow.ExportPFC(os.Stdout) + secFilterDeny.ExportPFC(os.Stdout) } - // Cancel once Committed is a NOP - defer fout.Cancel() - if err := secFilter.ExportBPF(fout.File); err != nil { + if err := writeSeccompFilter(out, secFilterAllow, secFilterDeny); err != nil { return err } - return fout.Commit() + return nil } // caches for uid and gid lookups @@ -914,7 +1025,7 @@ fmt.Println("compile needs an input and output file") os.Exit(1) } - content, err = ioutil.ReadFile(os.Args[2]) + content, err = os.ReadFile(os.Args[2]) if err != nil { break } diff -Nru snapd-2.62+23.10/cmd/snap-seccomp/main_test.go snapd-2.63+23.10/cmd/snap-seccomp/main_test.go --- snapd-2.62+23.10/cmd/snap-seccomp/main_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-seccomp/main_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -36,7 +36,6 @@ main "github.com/snapcore/snapd/cmd/snap-seccomp" "github.com/snapcore/snapd/osutil" "github.com/snapcore/snapd/release" - "github.com/snapcore/snapd/testutil" ) // Hook up check.v1 into the "go test" runner @@ -52,6 +51,7 @@ const ( Deny = iota + DenyExplicit Allow ) @@ -70,31 +70,57 @@ #define MAX_BPF_SIZE 32 * 1024 +// keep in sync with: +// cmd/snap-confine/seccomp-support.c +// main.go:scSeccompFileHeader +struct sc_seccomp_file_header { + char header[2]; + char version; + char unrestricted; + char padding[4]; + uint32_t len_allow_filter; + uint32_t len_deny_filter; + char reserved2[112]; +}; + int sc_apply_seccomp_bpf(const char* profile_path) { - unsigned char bpf[MAX_BPF_SIZE + 1]; // account for EOF + struct sc_seccomp_file_header hdr = {{0}, 0}; + unsigned char bpf_allow[MAX_BPF_SIZE + 1]; // account for EOF + unsigned char bpf_deny[MAX_BPF_SIZE + 1]; // account for EOF FILE* fp; + fp = fopen(profile_path, "rb"); if (fp == NULL) { fprintf(stderr, "cannot read %s\n", profile_path); return -1; } - - // set 'size' to 1; to get bytes transferred - size_t num_read = fread(bpf, 1, sizeof(bpf), fp); - + fread(&hdr, 1, sizeof(struct sc_seccomp_file_header), fp); + if (ferror(fp) != 0) { + perror("fread() header"); + return -1; + } + fread(bpf_allow, 1, hdr.len_allow_filter, fp); if (ferror(fp) != 0) { perror("fread()"); return -1; - } else if (feof(fp) == 0) { - fprintf(stderr, "file too big\n"); + } + fread(bpf_deny, 1, hdr.len_deny_filter, fp); + if (ferror(fp) != 0) { + perror("fread()"); return -1; } + fclose(fp); - struct sock_fprog prog = { - .len = num_read / sizeof(struct sock_filter), - .filter = (struct sock_filter*)bpf, + struct sock_fprog prog_allow = { + .len = hdr.len_allow_filter / sizeof(struct sock_filter), + .filter = (struct sock_filter*)bpf_allow, + }; + + struct sock_fprog prog_deny = { + .len = hdr.len_deny_filter / sizeof(struct sock_filter), + .filter = (struct sock_filter*)bpf_deny, }; // Set NNP to allow loading seccomp policy into the kernel without @@ -104,10 +130,15 @@ return -1; } - if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) { - perror("prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...) failed"); + if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_deny)) { + perror("prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...) deny failed"); return -1; } + if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_allow)) { + perror("prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...) allow failed"); + return -1; + } + return 0; } @@ -152,9 +183,12 @@ syscall_ret = syscall(l[0], l[1], l[2], l[3], l[4], l[5], l[6]); // 911 is our mocked errno for implicit denials via unlisted syscalls and // 999 is explicit denial - if (syscall_ret < 0 && (errno == 911 || errno == 999)) { + if (syscall_ret < 0 && errno == 911) { ret = 10; } + if (syscall_ret < 0 && errno == 999) { + ret = 20; + } syscall(SYS_exit, ret, 0, 0, 0, 0, 0); return 0; } @@ -223,7 +257,7 @@ // posix_fadvise, pread64, pwrite64, readahead, sync_file_range, and truncate64. // // Once we start using those. See `man syscall` -func (s *snapSeccompSuite) runBpf(c *C, seccompWhitelist, bpfInput string, expected int) { +func (s *snapSeccompSuite) runBpf(c *C, seccompAllowlist, bpfInput string, expected int) { // Common syscalls we need to allow for a minimal statically linked // c program. // @@ -253,7 +287,7 @@ mprotect ` bpfPath := filepath.Join(c.MkDir(), "bpf") - err := main.Compile([]byte(common+seccompWhitelist), bpfPath) + err := main.Compile([]byte(common+seccompAllowlist), bpfPath) c.Assert(err, IsNil) // default syscall runner @@ -304,7 +338,7 @@ case syscallNr == -10165: // "mknod" on arm64 is not available at all on arm64 // only "mknodat" but libseccomp will not generate a - // "mknodat" whitelist, it geneates a whitelist with + // "mknodat" allowlist, it geneates a allowlist with // syscall -10165 (!?!) so we cannot test this. c.Skip("skipping mknod tests on arm64") case syscallNr < 0: @@ -341,15 +375,25 @@ // else is unexpected (segv, strtoll failure, ...) exitCode, e := osutil.ExitCode(err) c.Assert(e, IsNil) - c.Assert(exitCode == 0 || exitCode == 10, Equals, true, Commentf("unexpected exit code: %v for %v - test setup broken", exitCode, seccompWhitelist)) + c.Assert(exitCode == 0 || exitCode == 10 || exitCode == 20, Equals, true, Commentf("unexpected exit code: %v for %v - test setup broken", exitCode, seccompAllowlist)) switch expected { case Allow: if err != nil { - c.Fatalf("unexpected error for %q (failed to run %q)", seccompWhitelist, err) + c.Fatalf("unexpected error for %q (failed to run %q)", seccompAllowlist, err) } case Deny: + if exitCode != 10 { + c.Fatalf("unexpected exit code for %q %q (%v != %v)", seccompAllowlist, bpfInput, exitCode, 10) + } if err == nil { - c.Fatalf("unexpected success for %q %q (ran but should have failed)", seccompWhitelist, bpfInput) + c.Fatalf("unexpected success for %q %q (ran but should have failed)", seccompAllowlist, bpfInput) + } + case DenyExplicit: + if exitCode != 20 { + c.Fatalf("unexpected exit code for %q %q (%v != %v)", seccompAllowlist, bpfInput, exitCode, 20) + } + if err == nil { + c.Fatalf("unexpected success for %q %q (ran but should have failed)", seccompAllowlist, bpfInput) } default: c.Fatalf("unknown expected result %v", expected) @@ -362,10 +406,13 @@ err := main.Compile([]byte(inp), outPath) c.Assert(err, IsNil) - c.Check(outPath, testutil.FileEquals, inp) + expected := [128]byte{'S', 'C', 0x1, 0x1} + fileContent, err := os.ReadFile(outPath) + c.Assert(err, IsNil) + c.Check(fileContent, DeepEquals, expected[:]) } -// TestCompile iterates over a range of textual seccomp whitelist rules and +// TestCompile iterates over a range of textual seccomp allowlist rules and // mocked kernel syscall input. For each rule, the test consists of compiling // the rule into a bpf program and then running that program on a virtual bpf // machine and comparing the bpf machine output to the specified expected @@ -383,7 +430,7 @@ func (s *snapSeccompSuite) TestCompile(c *C) { for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -394,8 +441,9 @@ {"read", "read", Allow}, {"read\nwrite\nexecve\n", "write", Allow}, - // trivial denial - {"read", "ioctl", Deny}, + // trivial denial (uses write in allow-list to ensure any + // errors printing is visible) + {"write", "ioctl", Deny}, // test argument filtering syntax, we currently support: // >=, <=, !, <, >, | @@ -450,12 +498,12 @@ {"ioctl - TIOCSTI", "ioctl;native;-,TIOCSTI", Allow}, {"ioctl - TIOCSTI", "ioctl;native;-,99", Deny}, {"ioctl - !TIOCSTI", "ioctl;native;-,TIOCSTI", Deny}, - {"~ioctl - TIOCSTI", "ioctl;native;-,TIOCSTI", Deny}, + {"ioctl\n~ioctl - TIOCSTI", "ioctl;native;-,TIOCSTI", DenyExplicit}, // also check we can deny multiple uses of ioctl but still allow // others - {"~ioctl - TIOCSTI\n~ioctl - TIOCLINUX\nioctl - !TIOCSTI", "ioctl;native;-,TIOCSTI", Deny}, - {"~ioctl - TIOCSTI\n~ioctl - TIOCLINUX\nioctl - !TIOCSTI", "ioctl;native;-,TIOCLINUX", Deny}, - {"~ioctl - TIOCSTI\n~ioctl - TIOCLINUX\nioctl - !TIOCSTI", "ioctl;native;-,TIOCGWINSZ", Allow}, + {"ioctl\n~ioctl - TIOCSTI\n~ioctl - TIOCLINUX\nioctl - !TIOCSTI", "ioctl;native;-,TIOCSTI", DenyExplicit}, + {"ioctl\n~ioctl - TIOCSTI\n~ioctl - TIOCLINUX\nioctl - !TIOCSTI", "ioctl;native;-,TIOCLINUX", DenyExplicit}, + {"ioctl\n~ioctl - TIOCSTI\n~ioctl - TIOCLINUX\nioctl - !TIOCSTI", "ioctl;native;-,TIOCGWINSZ", Allow}, // test_bad_seccomp_filter_args_clone {"setns - CLONE_NEWNET", "setns;native;-,99", Deny}, @@ -472,6 +520,13 @@ // test_bad_seccomp_filter_args_prio {"setpriority PRIO_PROCESS 0 >=0", "setpriority;native;PRIO_PROCESS,0,19", Allow}, {"setpriority PRIO_PROCESS 0 >=0", "setpriority;native;99", Deny}, + // negative filtering + {"setpriority\n~setpriority PRIO_PROCESS 0 >=0", "setpriority;native;PRIO_PROCESS,0,10", DenyExplicit}, + // mix negative/positiv filtering + // allow setprioty >= 5 but explicitly deny >=10 + {"setpriority PRIO_PROCESS 0 >=5\n~setpriority PRIO_PROCESS 0 >=10", "setpriority;native;PRIO_PROCESS,0,2", Deny}, + {"setpriority PRIO_PROCESS 0 >=5\n~setpriority PRIO_PROCESS 0 >=10", "setpriority;native;PRIO_PROCESS,0,5", Allow}, + {"setpriority PRIO_PROCESS 0 >=5\n~setpriority PRIO_PROCESS 0 >=10", "setpriority;native;PRIO_PROCESS,0,10", DenyExplicit}, // test_bad_seccomp_filter_args_quotactl {"quotactl Q_GETQUOTA", "quotactl;native;Q_GETQUOTA", Allow}, @@ -495,7 +550,7 @@ {"chown - -1 -1", "chown;native;-,-1,-1", Allow}, {"chown - -1 -1", "chown;native;-,99,-1", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } @@ -510,7 +565,7 @@ } for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -523,7 +578,7 @@ {"socket AF_CONN", "socket;native;AF_CONN", Allow}, {"socket AF_CONN", "socket;native;99", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } @@ -633,29 +688,29 @@ for _, pre := range []string{"AF", "PF"} { for _, i := range []string{"UNIX", "LOCAL", "INET", "INET6", "IPX", "NETLINK", "X25", "AX25", "ATMPVC", "APPLETALK", "PACKET", "ALG", "CAN", "BRIDGE", "NETROM", "ROSE", "NETBEUI", "SECURITY", "KEY", "ASH", "ECONET", "SNA", "IRDA", "PPPOX", "WANPIPE", "BLUETOOTH", "RDS", "LLC", "TIPC", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF", "NFC", "VSOCK", "MPLS", "IB"} { - seccompWhitelist := fmt.Sprintf("socket %s_%s", pre, i) + seccompAllowlist := fmt.Sprintf("socket %s_%s", pre, i) bpfInputGood := fmt.Sprintf("socket;native;%s_%s", pre, i) bpfInputBad := "socket;native;99999" - s.runBpf(c, seccompWhitelist, bpfInputGood, Allow) - s.runBpf(c, seccompWhitelist, bpfInputBad, Deny) + s.runBpf(c, seccompAllowlist, bpfInputGood, Allow) + s.runBpf(c, seccompAllowlist, bpfInputBad, Deny) for _, j := range []string{"SOCK_STREAM", "SOCK_DGRAM", "SOCK_SEQPACKET", "SOCK_RAW", "SOCK_RDM", "SOCK_PACKET"} { - seccompWhitelist := fmt.Sprintf("socket %s_%s %s", pre, i, j) + seccompAllowlist := fmt.Sprintf("socket %s_%s %s", pre, i, j) bpfInputGood := fmt.Sprintf("socket;native;%s_%s,%s", pre, i, j) bpfInputBad := fmt.Sprintf("socket;native;%s_%s,9999", pre, i) - s.runBpf(c, seccompWhitelist, bpfInputGood, Allow) - s.runBpf(c, seccompWhitelist, bpfInputBad, Deny) + s.runBpf(c, seccompAllowlist, bpfInputGood, Allow) + s.runBpf(c, seccompAllowlist, bpfInputBad, Deny) } } } for _, i := range []string{"NETLINK_ROUTE", "NETLINK_USERSOCK", "NETLINK_FIREWALL", "NETLINK_SOCK_DIAG", "NETLINK_NFLOG", "NETLINK_XFRM", "NETLINK_SELINUX", "NETLINK_ISCSI", "NETLINK_AUDIT", "NETLINK_FIB_LOOKUP", "NETLINK_CONNECTOR", "NETLINK_NETFILTER", "NETLINK_IP6_FW", "NETLINK_DNRTMSG", "NETLINK_KOBJECT_UEVENT", "NETLINK_GENERIC", "NETLINK_SCSITRANSPORT", "NETLINK_ECRYPTFS", "NETLINK_RDMA", "NETLINK_CRYPTO", "NETLINK_INET_DIAG"} { for _, j := range []string{"AF_NETLINK", "PF_NETLINK"} { - seccompWhitelist := fmt.Sprintf("socket %s - %s", j, i) + seccompAllowlist := fmt.Sprintf("socket %s - %s", j, i) bpfInputGood := fmt.Sprintf("socket;native;%s,0,%s", j, i) bpfInputBad := fmt.Sprintf("socket;native;%s,0,99", j) - s.runBpf(c, seccompWhitelist, bpfInputGood, Allow) - s.runBpf(c, seccompWhitelist, bpfInputBad, Deny) + s.runBpf(c, seccompAllowlist, bpfInputGood, Allow) + s.runBpf(c, seccompAllowlist, bpfInputBad, Deny) } } } @@ -664,12 +719,12 @@ func (s *snapSeccompSuite) TestRestrictionsWorkingArgsQuotactl(c *C) { for _, arg := range []string{"Q_QUOTAON", "Q_QUOTAOFF", "Q_GETQUOTA", "Q_SETQUOTA", "Q_GETINFO", "Q_SETINFO", "Q_GETFMT", "Q_SYNC", "Q_XQUOTAON", "Q_XQUOTAOFF", "Q_XGETQUOTA", "Q_XSETQLIM", "Q_XGETQSTAT", "Q_XQUOTARM"} { // good input - seccompWhitelist := fmt.Sprintf("quotactl %s", arg) + seccompAllowlist := fmt.Sprintf("quotactl %s", arg) bpfInputGood := fmt.Sprintf("quotactl;native;%s", arg) - s.runBpf(c, seccompWhitelist, bpfInputGood, Allow) + s.runBpf(c, seccompAllowlist, bpfInputGood, Allow) // bad input for _, bad := range []string{"quotactl;native;99999", "read;native;"} { - s.runBpf(c, seccompWhitelist, bad, Deny) + s.runBpf(c, seccompAllowlist, bad, Deny) } } } @@ -678,24 +733,24 @@ func (s *snapSeccompSuite) TestRestrictionsWorkingArgsPrctl(c *C) { for _, arg := range []string{"PR_CAP_AMBIENT", "PR_CAP_AMBIENT_RAISE", "PR_CAP_AMBIENT_LOWER", "PR_CAP_AMBIENT_IS_SET", "PR_CAP_AMBIENT_CLEAR_ALL", "PR_CAPBSET_READ", "PR_CAPBSET_DROP", "PR_SET_CHILD_SUBREAPER", "PR_GET_CHILD_SUBREAPER", "PR_SET_DUMPABLE", "PR_GET_DUMPABLE", "PR_GET_ENDIAN", "PR_SET_FPEMU", "PR_GET_FPEMU", "PR_SET_FPEXC", "PR_GET_FPEXC", "PR_SET_KEEPCAPS", "PR_GET_KEEPCAPS", "PR_MCE_KILL", "PR_MCE_KILL_GET", "PR_SET_MM", "PR_SET_MM_START_CODE", "PR_SET_MM_END_CODE", "PR_SET_MM_START_DATA", "PR_SET_MM_END_DATA", "PR_SET_MM_START_STACK", "PR_SET_MM_START_BRK", "PR_SET_MM_BRK", "PR_SET_MM_ARG_START", "PR_SET_MM_ARG_END", "PR_SET_MM_ENV_START", "PR_SET_MM_ENV_END", "PR_SET_MM_AUXV", "PR_SET_MM_EXE_FILE", "PR_MPX_ENABLE_MANAGEMENT", "PR_MPX_DISABLE_MANAGEMENT", "PR_SET_NAME", "PR_GET_NAME", "PR_SET_NO_NEW_PRIVS", "PR_GET_NO_NEW_PRIVS", "PR_SET_PDEATHSIG", "PR_GET_PDEATHSIG", "PR_SET_PTRACER", "PR_SET_SECCOMP", "PR_GET_SECCOMP", "PR_SET_SECUREBITS", "PR_GET_SECUREBITS", "PR_SET_THP_DISABLE", "PR_TASK_PERF_EVENTS_DISABLE", "PR_TASK_PERF_EVENTS_ENABLE", "PR_GET_THP_DISABLE", "PR_GET_TID_ADDRESS", "PR_SET_TIMERSLACK", "PR_GET_TIMERSLACK", "PR_SET_TIMING", "PR_GET_TIMING", "PR_SET_TSC", "PR_GET_TSC", "PR_SET_UNALIGN", "PR_GET_UNALIGN"} { // good input - seccompWhitelist := fmt.Sprintf("prctl %s", arg) + seccompAllowlist := fmt.Sprintf("prctl %s", arg) bpfInputGood := fmt.Sprintf("prctl;native;%s", arg) - s.runBpf(c, seccompWhitelist, bpfInputGood, Allow) + s.runBpf(c, seccompAllowlist, bpfInputGood, Allow) // bad input for _, bad := range []string{"prctl;native;99999", "setpriority;native;"} { - s.runBpf(c, seccompWhitelist, bad, Deny) + s.runBpf(c, seccompAllowlist, bad, Deny) } if arg == "PR_CAP_AMBIENT" { for _, j := range []string{"PR_CAP_AMBIENT_RAISE", "PR_CAP_AMBIENT_LOWER", "PR_CAP_AMBIENT_IS_SET", "PR_CAP_AMBIENT_CLEAR_ALL"} { - seccompWhitelist := fmt.Sprintf("prctl %s %s", arg, j) + seccompAllowlist := fmt.Sprintf("prctl %s %s", arg, j) bpfInputGood := fmt.Sprintf("prctl;native;%s,%s", arg, j) - s.runBpf(c, seccompWhitelist, bpfInputGood, Allow) + s.runBpf(c, seccompAllowlist, bpfInputGood, Allow) for _, bad := range []string{ fmt.Sprintf("prctl;native;%s,99999", arg), "setpriority;native;", } { - s.runBpf(c, seccompWhitelist, bad, Deny) + s.runBpf(c, seccompAllowlist, bad, Deny) } } } @@ -705,7 +760,7 @@ // ported from test_restrictions_working_args_clone func (s *snapSeccompSuite) TestRestrictionsWorkingArgsClone(c *C) { for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -724,14 +779,14 @@ {"setns - CLONE_NEWUSER", "setns;native;-,99", Deny}, {"setns - CLONE_NEWUTS", "setns;native;-,99", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } // ported from test_restrictions_working_args_mknod func (s *snapSeccompSuite) TestRestrictionsWorkingArgsMknod(c *C) { for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -748,14 +803,14 @@ {"mknod - S_IFIFO", "mknod;native;-,999", Deny}, {"mknod - S_IFSOCK", "mknod;native;-,999", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } // ported from test_restrictions_working_args_prio func (s *snapSeccompSuite) TestRestrictionsWorkingArgsPrio(c *C) { for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -768,14 +823,14 @@ {"setpriority PRIO_PGRP", "setpriority;native;99", Deny}, {"setpriority PRIO_USER", "setpriority;native;99", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } // ported from test_restrictions_working_args_termios func (s *snapSeccompSuite) TestRestrictionsWorkingArgsTermios(c *C) { for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -784,7 +839,7 @@ // bad input {"ioctl - TIOCSTI", "quotactl;native;-,99", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } @@ -798,7 +853,7 @@ } for _, t := range []struct { - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -814,7 +869,7 @@ {"setgid g:root", "setgid;native;99", Deny}, {"setgid g:daemon", "setgid;native;99", Deny}, } { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } @@ -824,7 +879,7 @@ } for _, t := range []struct { arch string - seccompWhitelist string + seccompAllowlist string bpfInput string expected int }{ @@ -851,7 +906,16 @@ // here because on endian mismatch the arch will *not* be // added if arch.DpkgArchitecture() == t.arch { - s.runBpf(c, t.seccompWhitelist, t.bpfInput, t.expected) + s.runBpf(c, t.seccompAllowlist, t.bpfInput, t.expected) } } } + +func (s *snapSeccompSuite) TestExportBpfErrors(c *C) { + fout, err := os.Create(filepath.Join(c.MkDir(), "filter")) + c.Assert(err, IsNil) + + // invalid filter + _, err = main.ExportBPF(fout, &seccomp.ScmpFilter{}) + c.Check(err, ErrorMatches, "cannot export bpf filter: filter is invalid or uninitialized") +} diff -Nru snapd-2.62+23.10/cmd/snap-update-ns/change_test.go snapd-2.63+23.10/cmd/snap-update-ns/change_test.go --- snapd-2.62+23.10/cmd/snap-update-ns/change_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-update-ns/change_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "errors" - "os" + "io/fs" "path/filepath" "syscall" @@ -776,7 +776,7 @@ // error, read only filesystem, create a mimic {C: `lstat "/rofs" `, R: syscall.Stat_t{Uid: 0, Gid: 0, Mode: 0755}}, - {C: `readdir "/rofs"`, R: []os.FileInfo(nil)}, + {C: `readdir "/rofs"`, R: []fs.DirEntry(nil)}, {C: `lstat "/tmp/.snap/rofs"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`}, @@ -911,7 +911,7 @@ // error, read only filesystem, create a mimic {C: `lstat "/rofs" `, R: syscall.Stat_t{Uid: 0, Gid: 0, Mode: 0755}}, - {C: `readdir "/rofs"`, R: []os.FileInfo(nil)}, + {C: `readdir "/rofs"`, R: []fs.DirEntry(nil)}, {C: `lstat "/tmp/.snap/rofs"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`}, @@ -1287,7 +1287,7 @@ // error, read only filesystem, create a mimic {C: `lstat "/rofs" `, R: syscall.Stat_t{Uid: 0, Gid: 0, Mode: 0755}}, - {C: `readdir "/rofs"`, R: []os.FileInfo(nil)}, + {C: `readdir "/rofs"`, R: []fs.DirEntry(nil)}, {C: `lstat "/tmp/.snap/rofs"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`}, @@ -1437,7 +1437,7 @@ // error /rofs is a read-only filesystem, create a mimic {C: `lstat "/rofs" `, R: syscall.Stat_t{Mode: 0755}}, - {C: `readdir "/rofs"`, R: []os.FileInfo(nil)}, + {C: `readdir "/rofs"`, R: []fs.DirEntry(nil)}, {C: `lstat "/tmp/.snap/rofs"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`}, @@ -1828,7 +1828,7 @@ // error, read only filesystem, create a mimic {C: `lstat "/rofs" `, R: syscall.Stat_t{Mode: 0755}}, - {C: `readdir "/rofs"`, R: []os.FileInfo(nil)}, + {C: `readdir "/rofs"`, R: []fs.DirEntry(nil)}, {C: `lstat "/tmp/.snap/rofs"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`}, @@ -2223,7 +2223,7 @@ // error, read only filesystem, create a mimic {C: `lstat "/rofs" `, R: syscall.Stat_t{Mode: 0755}}, - {C: `readdir "/rofs"`, R: []os.FileInfo(nil)}, + {C: `readdir "/rofs"`, R: []fs.DirEntry(nil)}, {C: `lstat "/tmp/.snap/rofs"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`}, @@ -2368,12 +2368,14 @@ syscall.Statfs_t{Type: update.TmpfsMagic}) s.sys.InsertFstatResult(`fstat 4 `, syscall.Stat_t{}) s.sys.InsertSysLstatResult(`lstat "/etc" `, syscall.Stat_t{Mode: 0755}) - otherConf := testutil.FakeFileInfo("other.conf", 0755) - s.sys.InsertReadDirResult(`readdir "/etc"`, []os.FileInfo{otherConf}) + otherConf := testutil.FakeDirEntry("other.conf", 0755) + s.sys.InsertReadDirResult(`readdir "/etc"`, []fs.DirEntry{otherConf}) s.sys.InsertFault(`lstat "/tmp/.snap/etc"`, syscall.ENOENT) s.sys.InsertFault(`lstat "/tmp/.snap/etc/other.conf"`, syscall.ENOENT) s.sys.InsertOsLstatResult(`lstat "/etc"`, testutil.FileInfoDir) - s.sys.InsertOsLstatResult(`lstat "/etc/other.conf"`, otherConf) + otherConfInfo, err := otherConf.Info() + c.Assert(err, IsNil) + s.sys.InsertOsLstatResult(`lstat "/etc/other.conf"`, otherConfInfo) s.sys.InsertFault(`mkdirat 3 "tmp" 0755`, syscall.EEXIST) s.sys.InsertFstatResult(`fstat 5 `, syscall.Stat_t{Mode: syscall.S_IFREG}) s.sys.InsertFstatResult(`fstat 4 `, syscall.Stat_t{Mode: syscall.S_IFDIR}) @@ -2415,7 +2417,7 @@ // For convenience we pretend that /etc is empty. The mimic // replicates /etc in /tmp/.snap/etc for subsequent re-construction. {C: `lstat "/etc" `, R: syscall.Stat_t{Mode: 0755}}, - {C: `readdir "/etc"`, R: []os.FileInfo{otherConf}}, + {C: `readdir "/etc"`, R: []fs.DirEntry{otherConf}}, {C: `lstat "/tmp/.snap/etc"`, E: syscall.ENOENT}, {C: `open "/" O_NOFOLLOW|O_CLOEXEC|O_DIRECTORY 0`, R: 3}, {C: `mkdirat 3 "tmp" 0755`, E: syscall.EEXIST}, @@ -2469,7 +2471,7 @@ {C: `lstat "/etc"`, R: testutil.FileInfoDir}, {C: `mount "tmpfs" "/etc" "tmpfs" 0 "mode=0755,uid=0,gid=0"`}, // Here we restore the contents of /etc: here it's just one file - other.conf - {C: `lstat "/etc/other.conf"`, R: otherConf}, + {C: `lstat "/etc/other.conf"`, R: otherConfInfo}, {C: `lstat "/tmp/.snap/etc/other.conf"`, E: syscall.ENOENT}, // Create /tmp/.snap/etc/other.conf as an empty file. @@ -2987,7 +2989,7 @@ s.sys.InsertFault(`mkdirat 6 "rofs" 0755`, syscall.EEXIST) s.sys.InsertFault(`mkdirat 7 "dir" 0755`, syscall.EROFS, nil) s.sys.InsertSysLstatResult(`lstat "/snap/some-snap/x1/rofs" `, syscall.Stat_t{}) - s.sys.InsertReadDirResult(`readdir "/snap/some-snap/x1/rofs"`, []os.FileInfo{}) + s.sys.InsertReadDirResult(`readdir "/snap/some-snap/x1/rofs"`, []fs.DirEntry{}) s.sys.InsertOsLstatResult(`lstat "/tmp/.snap/snap/some-snap/x1/rofs"`, testutil.FileInfoDir) s.sys.InsertOsLstatResult(`lstat "/snap/some-snap/x1/rofs"`, testutil.FileInfoDir) s.sys.InsertFstatResult(`fstat 7 `, syscall.Stat_t{}) @@ -3023,7 +3025,7 @@ s.sys.InsertFault(`mkdirat 6 "rofs" 0755`, syscall.EEXIST) s.sys.InsertFault(`mkdirat 7 "dir" 0755`, syscall.EROFS, nil) s.sys.InsertSysLstatResult(`lstat "/snap/some-snap/x1/rofs" `, syscall.Stat_t{}) - s.sys.InsertReadDirResult(`readdir "/snap/some-snap/x1/rofs"`, []os.FileInfo{}) + s.sys.InsertReadDirResult(`readdir "/snap/some-snap/x1/rofs"`, []fs.DirEntry{}) s.sys.InsertOsLstatResult(`lstat "/tmp/.snap/snap/some-snap/x1/rofs"`, testutil.FileInfoDir) s.sys.InsertOsLstatResult(`lstat "/snap/some-snap/x1/rofs"`, testutil.FileInfoDir) s.sys.InsertFstatResult(`fstat 7 `, syscall.Stat_t{}) diff -Nru snapd-2.62+23.10/cmd/snap-update-ns/export_test.go snapd-2.63+23.10/cmd/snap-update-ns/export_test.go --- snapd-2.62+23.10/cmd/snap-update-ns/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-update-ns/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,6 +20,7 @@ package main import ( + "io/fs" "os" "syscall" @@ -67,7 +68,7 @@ type SystemCalls interface { OsLstat(name string) (os.FileInfo, error) SysLstat(name string, buf *syscall.Stat_t) error - ReadDir(dirname string) ([]os.FileInfo, error) + ReadDir(dirname string) ([]fs.DirEntry, error) Symlinkat(oldname string, dirfd int, newname string) error Readlinkat(dirfd int, path string, buf []byte) (int, error) Remove(name string) error @@ -89,7 +90,7 @@ // save oldOsLstat := osLstat oldRemove := osRemove - oldIoutilReadDir := ioutilReadDir + oldOsReadDir := osReadDir oldSysClose := sysClose oldSysFchown := sysFchown @@ -108,7 +109,7 @@ // override osLstat = sc.OsLstat osRemove = sc.Remove - ioutilReadDir = sc.ReadDir + osReadDir = sc.ReadDir sysClose = sc.Close sysFchown = sc.Fchown @@ -128,7 +129,7 @@ // restore osLstat = oldOsLstat osRemove = oldRemove - ioutilReadDir = oldIoutilReadDir + osReadDir = oldOsReadDir sysClose = oldSysClose sysFchown = oldSysFchown @@ -184,11 +185,11 @@ } } -func MockReadDir(fn func(string) ([]os.FileInfo, error)) (restore func()) { - old := ioutilReadDir - ioutilReadDir = fn +func MockReadDir(fn func(string) ([]fs.DirEntry, error)) (restore func()) { + old := osReadDir + osReadDir = fn return func() { - ioutilReadDir = old + osReadDir = old } } diff -Nru snapd-2.62+23.10/cmd/snap-update-ns/utils.go snapd-2.63+23.10/cmd/snap-update-ns/utils.go --- snapd-2.62+23.10/cmd/snap-update-ns/utils.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-update-ns/utils.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -69,7 +68,7 @@ sysGetuid = sys.Getuid sysGetgid = sys.Getgid - ioutilReadDir = ioutil.ReadDir + osReadDir = os.ReadDir ) // ReadOnlyFsError is an error encapsulating encountered EROFS. @@ -634,7 +633,7 @@ }, }) // Iterate over the items in the original directory (nothing is mounted _yet_). - entries, err := ioutilReadDir(dir) + entries, err := osReadDir(dir) if err != nil { return nil, err } @@ -646,7 +645,7 @@ // Bind mount each element from the safe-keeping directory into the // tmpfs. Our Change.Perform() engine can create the missing // directories automatically so we don't bother creating those. - m := fi.Mode() + m := fi.Type() switch { case m.IsDir(): ch.Entry.Options = []string{"rbind"} diff -Nru snapd-2.62+23.10/cmd/snap-update-ns/utils_test.go snapd-2.63+23.10/cmd/snap-update-ns/utils_test.go --- snapd-2.62+23.10/cmd/snap-update-ns/utils_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snap-update-ns/utils_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,6 +23,7 @@ "bytes" "errors" "fmt" + "io/fs" "os" "path/filepath" "syscall" @@ -686,21 +687,21 @@ func (s *utilsSuite) TestPlanWritableMimic(c *C) { s.sys.InsertSysLstatResult(`lstat "/foo" `, syscall.Stat_t{Uid: 0, Gid: 0, Mode: 0755}) - restore := update.MockReadDir(func(dir string) ([]os.FileInfo, error) { + restore := update.MockReadDir(func(dir string) ([]fs.DirEntry, error) { c.Assert(dir, Equals, "/foo") - return []os.FileInfo{ - testutil.FakeFileInfo("file", 0), - testutil.FakeFileInfo("dir", os.ModeDir), - testutil.FakeFileInfo("symlink", os.ModeSymlink), - testutil.FakeFileInfo("error-symlink-readlink", os.ModeSymlink), + return []fs.DirEntry{ + testutil.FakeDirEntry("file", 0), + testutil.FakeDirEntry("dir", os.ModeDir), + testutil.FakeDirEntry("symlink", os.ModeSymlink), + testutil.FakeDirEntry("error-symlink-readlink", os.ModeSymlink), // NOTE: None of the filesystem entries below are supported because // they cannot be placed inside snaps or can only be created at // runtime in areas that are already writable and this would never // have to be handled in a writable mimic. - testutil.FakeFileInfo("block-dev", os.ModeDevice), - testutil.FakeFileInfo("char-dev", os.ModeDevice|os.ModeCharDevice), - testutil.FakeFileInfo("socket", os.ModeSocket), - testutil.FakeFileInfo("pipe", os.ModeNamedPipe), + testutil.FakeDirEntry("block-dev", os.ModeDevice), + testutil.FakeDirEntry("char-dev", os.ModeDevice|os.ModeCharDevice), + testutil.FakeDirEntry("socket", os.ModeSocket), + testutil.FakeDirEntry("pipe", os.ModeNamedPipe), }, nil }) defer restore() @@ -737,7 +738,7 @@ func (s *utilsSuite) TestPlanWritableMimicErrors(c *C) { s.sys.InsertSysLstatResult(`lstat "/foo" `, syscall.Stat_t{Uid: 0, Gid: 0, Mode: 0755}) - restore := update.MockReadDir(func(dir string) ([]os.FileInfo, error) { + restore := update.MockReadDir(func(dir string) ([]fs.DirEntry, error) { c.Assert(dir, Equals, "/foo") return nil, errTesting }) diff -Nru snapd-2.62+23.10/cmd/snapd/main_test.go snapd-2.63+23.10/cmd/snapd/main_test.go --- snapd-2.62+23.10/cmd/snapd/main_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snapd/main_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -63,7 +63,7 @@ func (s *snapdSuite) TestSyscheckFailGoesIntoDegradedMode(c *C) { logbuf, restore := logger.MockLogger() defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = seccomp.MockSnapSeccompVersionInfo("abcdef 1.2.3 1234abcd -") defer restore() diff -Nru snapd-2.62+23.10/cmd/snapd-apparmor/main.go snapd-2.63+23.10/cmd/snapd-apparmor/main.go --- snapd-2.62+23.10/cmd/snapd-apparmor/main.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snapd-apparmor/main.go 2024-04-24 00:00:39.000000000 +0000 @@ -36,7 +36,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -77,7 +76,7 @@ var nsStackedPath = filepath.Join(appArmorSecurityFSPath, ".ns_stacked") var nsNamePath = filepath.Join(appArmorSecurityFSPath, ".ns_name") - contents, err := ioutil.ReadFile(nsStackedPath) + contents, err := os.ReadFile(nsStackedPath) if err != nil && !errors.Is(err, os.ErrNotExist) { logger.Noticef("Failed to read %s: %v", nsStackedPath, err) return false @@ -87,7 +86,7 @@ return false } - contents, err = ioutil.ReadFile(nsNamePath) + contents, err = os.ReadFile(nsNamePath) if err != nil && !errors.Is(err, os.ErrNotExist) { logger.Noticef("Failed to read %s: %v", nsNamePath, err) return false diff -Nru snapd-2.62+23.10/cmd/snaplock/runinhibit/inhibit.go snapd-2.63+23.10/cmd/snaplock/runinhibit/inhibit.go --- snapd-2.62+23.10/cmd/snaplock/runinhibit/inhibit.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snaplock/runinhibit/inhibit.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,7 @@ "context" "encoding/json" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "time" @@ -383,7 +383,7 @@ } func hintFromFile(hintFile *os.File) (Hint, error) { - buf, err := ioutil.ReadAll(hintFile) + buf, err := io.ReadAll(hintFile) if err != nil { return "", err } @@ -391,7 +391,7 @@ } func readInhibitInfo(snapName string, hint Hint) (InhibitInfo, error) { - buf, err := ioutil.ReadFile(InhibitInfoFile(snapName, hint)) + buf, err := os.ReadFile(InhibitInfoFile(snapName, hint)) if err != nil { return InhibitInfo{}, err } diff -Nru snapd-2.62+23.10/cmd/snaplock/runinhibit/inhibit_test.go snapd-2.63+23.10/cmd/snaplock/runinhibit/inhibit_test.go --- snapd-2.62+23.10/cmd/snaplock/runinhibit/inhibit_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/cmd/snaplock/runinhibit/inhibit_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -85,7 +84,7 @@ func testInhibitInfo(c *C, snapName, hint string, expectedInfo runinhibit.InhibitInfo) { infoPath := filepath.Join(runinhibit.InhibitDir, fmt.Sprintf("%s.%s", snapName, hint)) var info runinhibit.InhibitInfo - buf, err := ioutil.ReadFile(infoPath) + buf, err := os.ReadFile(infoPath) c.Assert(err, IsNil) c.Assert(json.Unmarshal(buf, &info), IsNil) c.Check(info, Equals, expectedInfo) diff -Nru snapd-2.62+23.10/daemon/access.go snapd-2.63+23.10/daemon/access.go --- snapd-2.62+23.10/daemon/access.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/access.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2021 Canonical Ltd + * Copyright (C) 2021-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -20,7 +20,10 @@ package daemon import ( + "fmt" "net/http" + "os" + "path/filepath" "strconv" "github.com/snapcore/snapd/client" @@ -31,12 +34,15 @@ "github.com/snapcore/snapd/overlord/ifacestate" "github.com/snapcore/snapd/polkit" "github.com/snapcore/snapd/sandbox/cgroup" + "github.com/snapcore/snapd/strutil" ) var polkitCheckAuthorization = polkit.CheckAuthorization var checkPolkitAction = checkPolkitActionImpl +var osReadlink = os.Readlink + func checkPolkitActionImpl(r *http.Request, ucred *ucrednet, action string) *apiError { var flags polkit.CheckFlags allowHeader := r.Header.Get(client.AllowInteractionHeader) @@ -160,7 +166,7 @@ requireInterfaceApiAccess = requireInterfaceApiAccessImpl ) -func requireInterfaceApiAccessImpl(d *Daemon, r *http.Request, ucred *ucrednet, interfaceName string) *apiError { +func requireInterfaceApiAccessImpl(d *Daemon, r *http.Request, ucred *ucrednet, interfaceNames []string) *apiError { if ucred == nil { return Forbidden("access denied") } @@ -189,8 +195,9 @@ if err != nil { return Forbidden("internal error: cannot get connections: %s", err) } + foundMatchingInterface := false for refStr, connState := range conns { - if !connState.Active() || connState.Interface != interfaceName { + if !connState.Active() || !strutil.ListContains(interfaceNames, connState.Interface) { continue } connRef, err := interfaces.ParseConnRef(refStr) @@ -198,35 +205,39 @@ return Forbidden("internal error: %s", err) } if connRef.PlugRef.Snap == snapName { - r.RemoteAddr = ucrednetAttachInterface(r.RemoteAddr, interfaceName) - return nil + r.RemoteAddr = ucrednetAttachInterface(r.RemoteAddr, connState.Interface) + // Do not return here, but keep processing connections for the side + // effect of attaching all connected interfaces we asked for to the + // remote address. + foundMatchingInterface = true } } + if foundMatchingInterface { + return nil + } return Forbidden("access denied") } // interfaceOpenAccess behaves like openAccess, but allows requests from -// snapd-snap.socket for snaps that plug the provided interface. +// snapd-snap.socket for snaps that plug one of the provided interfaces. type interfaceOpenAccess struct { - // TODO: allow a list of interfaces - Interface string + Interfaces []string } func (ac interfaceOpenAccess) CheckAccess(d *Daemon, r *http.Request, ucred *ucrednet, user *auth.UserState) *apiError { - return requireInterfaceApiAccess(d, r, ucred, ac.Interface) + return requireInterfaceApiAccess(d, r, ucred, ac.Interfaces) } // interfaceAuthenticatedAccess behaves like authenticatedAccess, but -// allows requests from snapd-snap.socket that plug the provided -// interface. +// allows requests from snapd-snap.socket that plug one of the provided +// interfaces. type interfaceAuthenticatedAccess struct { - // TODO: allow a list of interfaces - Interface string - Polkit string + Interfaces []string + Polkit string } func (ac interfaceAuthenticatedAccess) CheckAccess(d *Daemon, r *http.Request, ucred *ucrednet, user *auth.UserState) *apiError { - if rspe := requireInterfaceApiAccess(d, r, ucred, ac.Interface); rspe != nil { + if rspe := requireInterfaceApiAccess(d, r, ucred, ac.Interfaces); rspe != nil { return rspe } @@ -249,3 +260,41 @@ return Unauthorized("access denied") } + +// isRequestFromSnapCmd checks that the request is coming from snap command. +// +// It checks that the request process "/proc/PID/exe" points to one of the +// known locations of the snap command. This not a security-oriented check. +func isRequestFromSnapCmd(r *http.Request) (bool, error) { + ucred, err := ucrednetGet(r.RemoteAddr) + if err != nil { + return false, err + } + exe, err := osReadlink(fmt.Sprintf("/proc/%d/exe", ucred.Pid)) + if err != nil { + return false, err + } + + // SNAP_REEXEC=0 + if exe == filepath.Join(dirs.GlobalRootDir, "/usr/bin/snap") { + return true, nil + } + + // Check if re-exec in snapd + path := filepath.Join(dirs.SnapMountDir, "snapd/*/usr/bin/snap") + if matched, err := filepath.Match(path, exe); err != nil { + return false, err + } else if matched { + return true, nil + } + + // Check if re-exec in core + path = filepath.Join(dirs.SnapMountDir, "core/*/usr/bin/snap") + if matched, err := filepath.Match(path, exe); err != nil { + return false, err + } else if matched { + return true, nil + } + + return false, nil +} diff -Nru snapd-2.62+23.10/daemon/access_test.go snapd-2.63+23.10/daemon/access_test.go --- snapd-2.62+23.10/daemon/access_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/access_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2021 Canonical Ltd + * Copyright (C) 2021-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -235,12 +235,14 @@ version: 1 slots: snap-themes-control: + snap-refresh-control: `) s.mockSnap(c, ` name: some-snap version: 1 plugs: snap-themes-control: + snap-refresh-control: `) restore := daemon.MockCgroupSnapNameFromPid(func(pid int) (string, error) { @@ -251,7 +253,7 @@ }) defer restore() - var ac daemon.AccessChecker = daemon.InterfaceOpenAccess{Interface: "snap-themes-control"} + var ac daemon.AccessChecker = daemon.InterfaceOpenAccess{Interfaces: []string{"snap-themes-control", "snap-refresh-control"}} // Access with no ucred data is forbidden c.Check(ac.CheckAccess(d, nil, nil, nil), DeepEquals, errForbidden) @@ -290,6 +292,23 @@ // Interface is attached to RemoteAddr c.Check(req.RemoteAddr, Equals, fmt.Sprintf("%siface=snap-themes-control;", ucred)) + // Now connect both interfaces + st.Lock() + st.Set("conns", map[string]interface{}{ + "some-snap:snap-themes-control core:snap-themes-control": map[string]interface{}{ + "interface": "snap-themes-control", + }, + "some-snap:snap-refresh-control core:snap-refresh-control": map[string]interface{}{ + "interface": "snap-refresh-control", + }, + }) + st.Unlock() + req = http.Request{RemoteAddr: ucred.String()} + c.Check(ac.CheckAccess(s.d, &req, ucred, nil), IsNil) + // Check that both interfaces are attached to RemoteAddr. + // Since conns is a map, order is not guaranteed. + c.Check(req.RemoteAddr, Matches, fmt.Sprintf("^%siface=(snap-themes-control&snap-refresh-control|snap-refresh-control&snap-themes-control);$", ucred)) + // A left over "undesired" connection does not grant access st.Lock() st.Set("conns", map[string]interface{}{ @@ -305,12 +324,12 @@ } func (s *accessSuite) TestInterfaceOpenAccess(c *C) { - var ac daemon.AccessChecker = daemon.InterfaceOpenAccess{Interface: "snap-themes-control"} + var ac daemon.AccessChecker = daemon.InterfaceOpenAccess{Interfaces: []string{"snap-themes-control"}} s.daemon(c) // interfaceOpenAccess allows access if requireInterfaceApiAccess() succeeds ucred := &daemon.Ucrednet{Uid: 42, Pid: 100, Socket: dirs.SnapSocket} - restore := daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceName string) *daemon.APIError { + restore := daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceNames []string) *daemon.APIError { c.Check(d, Equals, s.d) c.Check(u, Equals, ucred) return nil @@ -319,7 +338,7 @@ c.Check(ac.CheckAccess(s.d, nil, ucred, nil), IsNil) // Access is forbidden if requireInterfaceApiAccess() fails - restore = daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceName string) *daemon.APIError { + restore = daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceNames []string) *daemon.APIError { return errForbidden }) defer restore() @@ -342,7 +361,7 @@ // themesAuthenticatedAccess denies access if requireInterfaceApiAccess fails ucred := &daemon.Ucrednet{Uid: 0, Pid: 100, Socket: dirs.SnapSocket} - restore = daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceName string) *daemon.APIError { + restore = daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceNames []string) *daemon.APIError { c.Check(d, Equals, s.d) c.Check(u, Equals, ucred) return errForbidden @@ -352,7 +371,7 @@ c.Check(ac.CheckAccess(s.d, req, ucred, user), DeepEquals, errForbidden) // If requireInterfaceApiAccess succeeds, root is granted access - restore = daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceName string) *daemon.APIError { + restore = daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceNames []string) *daemon.APIError { return nil }) defer restore() @@ -374,7 +393,7 @@ ucred := &daemon.Ucrednet{Uid: 0, Pid: 100, Socket: dirs.SnapSocket} s.daemon(c) - restore := daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceName string) *daemon.APIError { + restore := daemon.MockRequireInterfaceApiAccess(func(d *daemon.Daemon, r *http.Request, u *daemon.Ucrednet, interfaceNames []string) *daemon.APIError { c.Check(d, Equals, s.d) c.Check(u, Equals, ucred) return nil diff -Nru snapd-2.62+23.10/daemon/api_accessories.go snapd-2.63+23.10/daemon/api_accessories.go --- snapd-2.62+23.10/daemon/api_accessories.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_accessories.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2021 Canonical Ltd + * Copyright (C) 2021-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -30,7 +30,7 @@ Path: "/v2/accessories/changes/{id}", GET: getAccessoriesChange, // TODO: expand this to other accessories APIs as they appear - ReadAccess: interfaceOpenAccess{Interface: "snap-themes-control"}, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-themes-control"}}, } ) diff -Nru snapd-2.62+23.10/daemon/api_accessories_test.go snapd-2.63+23.10/daemon/api_accessories_test.go --- snapd-2.62+23.10/daemon/api_accessories_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_accessories_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2021 Canonical Ltd + * Copyright (C) 2021-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -35,7 +35,7 @@ } func (s *accessoriesSuite) expectThemesAccess() { - s.expectReadAccess(daemon.InterfaceOpenAccess{Interface: "snap-themes-control"}) + s.expectReadAccess(daemon.InterfaceOpenAccess{Interfaces: []string{"snap-themes-control"}}) } func (s *accessoriesSuite) TestChangeInfo(c *C) { diff -Nru snapd-2.62+23.10/daemon/api_apps.go snapd-2.63+23.10/daemon/api_apps.go --- snapd-2.62+23.10/daemon/api_apps.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_apps.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,10 +20,12 @@ package daemon import ( + "context" "encoding/json" "fmt" "io" "net/http" + "net/url" "os/user" "sort" "strconv" @@ -54,9 +56,27 @@ } ) +var newStatusDecorator = func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator { + if isGlobal { + return servicestate.NewStatusDecorator(progress.Null) + } else { + return servicestate.NewStatusDecoratorForUid(progress.Null, ctx, uid) + } +} + +func readMaybeBoolValue(query url.Values, name string) (bool, error) { + if sel := query.Get(name); sel != "" { + if v, err := strconv.ParseBool(sel); err != nil { + return false, fmt.Errorf("invalid %s parameter: %q", name, sel) + } else { + return v, nil + } + } + return false, nil +} + func getAppsInfo(c *Command, r *http.Request, user *auth.UserState) Response { query := r.URL.Query() - opts := appInfoOptions{} switch sel := query.Get("select"); sel { case "": @@ -67,13 +87,22 @@ return BadRequest("invalid select parameter: %q", sel) } + global, err := readMaybeBoolValue(query, "global") + if err != nil { + return BadRequest(err.Error()) + } + appInfos, rspe := appInfosFor(c.d.overlord.State(), strutil.CommaSeparatedList(query.Get("names")), opts) if rspe != nil { return rspe } - sd := servicestate.NewStatusDecorator(progress.Null) + u, err := systemUserFromRequest(r) + if err != nil { + return BadRequest("cannot retrieve services: %v", err) + } + sd := newStatusDecorator(r.Context(), global, u.Uid) clientAppInfos, err := clientutil.ClientAppInfosFromSnapAppInfos(appInfos, sd) if err != nil { return InternalError("%v", err) diff -Nru snapd-2.62+23.10/daemon/api_apps_test.go snapd-2.63+23.10/daemon/api_apps_test.go --- snapd-2.62+23.10/daemon/api_apps_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_apps_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,11 +21,11 @@ import ( "bytes" + "context" "encoding/json" "errors" "fmt" "io" - "io/ioutil" "math" "net/http" "net/http/httptest" @@ -37,6 +37,7 @@ "gopkg.in/check.v1" "github.com/snapcore/snapd/client" + "github.com/snapcore/snapd/client/clientutil" "github.com/snapcore/snapd/daemon" "github.com/snapcore/snapd/overlord/hookstate" "github.com/snapcore/snapd/overlord/servicestate" @@ -49,6 +50,12 @@ var _ = check.Suite(&appsSuite{}) +type appsSuiteDecoratorResult struct { + daemonType string + active bool + enabled bool +} + type appsSuite struct { apiBaseSuite @@ -59,6 +66,7 @@ jctlNamespaces []bool jctlRCs []io.ReadCloser jctlErrs []error + decoratorResults map[string]appsSuiteDecoratorResult serviceControlError error serviceControlCalls []serviceControlArgs @@ -132,7 +140,9 @@ } func (s *appsSuite) TearDownSuite(c *check.C) { - s.journalctlRestorer() + if s.journalctlRestorer != nil { + s.journalctlRestorer() + } s.apiBaseSuite.TearDownSuite(c) } @@ -213,7 +223,7 @@ svcNames = append(svcNames, "snap-e.svc4") s.SysctlBufs = append(s.SysctlBufs, []byte("enabled\n")) - req, err := http.NewRequest("GET", "/v2/apps", nil) + req, err := http.NewRequest("GET", "/v2/apps?global=true", nil) c.Assert(err, check.IsNil) rsp := s.syncReq(c, req, nil) @@ -285,6 +295,77 @@ } func (s *appsSuite) TestGetAppsInfoServices(c *check.C) { + r := daemon.MockNewStatusDecorator(func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator { + c.Check(isGlobal, check.Equals, false) + c.Check(uid, check.Equals, "0") + return s + }) + defer r() + + // System and user services from active snaps + s.decoratorResults = map[string]appsSuiteDecoratorResult{ + "snap-a.svc1": { + daemonType: "simple", + active: true, + enabled: true, + }, + "snap-a.svc2": { + daemonType: "simple", + active: true, + enabled: true, + }, + "snap-e.svc4": { + daemonType: "simple", + active: false, + enabled: true, + }, + } + + // System services from active snaps + svcNames := []string{"snap-a.svc1", "snap-a.svc2"} + // System services from inactive snaps + svcNames = append(svcNames, "snap-b.svc3") + // User services from active snaps + svcNames = append(svcNames, "snap-e.svc4") + + req, err := http.NewRequest("GET", "/v2/apps?select=service", nil) + c.Assert(err, check.IsNil) + + rsp := s.syncReq(c, req, nil) + c.Assert(rsp.Status, check.Equals, 200) + c.Assert(rsp.Result, check.FitsTypeOf, []client.AppInfo{}) + svcs := rsp.Result.([]client.AppInfo) + c.Assert(svcs, check.HasLen, 4) + + for _, name := range svcNames { + snapName, app := daemon.SplitAppName(name) + needle := client.AppInfo{ + Snap: snapName, + Name: app, + Daemon: "simple", + DaemonScope: snap.SystemDaemon, + } + if snapName != "snap-b" { + // snap-b is not active (all the others are) + needle.Active = true + needle.Enabled = true + } + if snapName == "snap-e" { + // snap-e contains user services + needle.DaemonScope = snap.UserDaemon + needle.Active = false + } + c.Check(svcs, testutil.DeepContains, needle) + } + + appNames := make([]string, len(svcs)) + for i, svc := range svcs { + appNames[i] = svc.Snap + "." + svc.Name + } + c.Check(sort.StringsAreSorted(appNames), check.Equals, true) +} + +func (s *appsSuite) TestGetAppsInfoServicesWithGlobal(c *check.C) { // System services from active snaps svcNames := []string{"snap-a.svc1", "snap-a.svc2"} for _, name := range svcNames { @@ -303,8 +384,9 @@ svcNames = append(svcNames, "snap-e.svc4") s.SysctlBufs = append(s.SysctlBufs, []byte("enabled\n")) - req, err := http.NewRequest("GET", "/v2/apps?select=service", nil) + req, err := http.NewRequest("GET", "/v2/apps?select=service&global=true", nil) c.Assert(err, check.IsNil) + s.asUserAuth(c, req) rsp := s.syncReq(c, req, nil) c.Assert(rsp.Status, check.Equals, 200) @@ -340,6 +422,84 @@ c.Check(sort.StringsAreSorted(appNames), check.Equals, true) } +func (s *appsSuite) DecorateWithStatus(appInfo *client.AppInfo, snapApp *snap.AppInfo) error { + name := snapApp.Snap.RealName + "." + appInfo.Name + dec, ok := s.decoratorResults[name] + if !ok { + return fmt.Errorf("%s not found in expected test decorator results", name) + } + appInfo.Daemon = dec.daemonType + appInfo.Enabled = dec.enabled + appInfo.Active = dec.active + return nil +} + +func (s *appsSuite) TestGetUserAppsInfoServices(c *check.C) { + r := daemon.MockNewStatusDecorator(func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator { + c.Check(isGlobal, check.Equals, false) + c.Check(uid, check.Equals, "1337") + return s + }) + defer r() + + // System and user services from active snaps + s.decoratorResults = map[string]appsSuiteDecoratorResult{ + "snap-a.svc1": { + daemonType: "simple", + active: true, + enabled: true, + }, + "snap-a.svc2": { + daemonType: "simple", + active: true, + enabled: true, + }, + "snap-e.svc4": { + daemonType: "simple", + active: true, + enabled: true, + }, + } + + // Perform the request as a non-root uid + req, err := http.NewRequest("GET", "/v2/apps?select=service", nil) + c.Assert(err, check.IsNil) + s.asUserAuth(c, req) + + rsp := s.syncReq(c, req, nil) + c.Assert(rsp.Status, check.Equals, 200) + c.Assert(rsp.Result, check.FitsTypeOf, []client.AppInfo{}) + svcs := rsp.Result.([]client.AppInfo) + c.Assert(svcs, check.HasLen, 4) + + for name := range s.decoratorResults { + snapName, app := daemon.SplitAppName(name) + needle := client.AppInfo{ + Snap: snapName, + Name: app, + Daemon: "simple", + DaemonScope: snap.SystemDaemon, + } + if snapName != "snap-b" { + // snap-b is not active (all the others are) + needle.Active = true + needle.Enabled = true + } + if snapName == "snap-e" { + // snap-e contains user services + needle.DaemonScope = snap.UserDaemon + needle.Active = true + } + c.Check(svcs, testutil.DeepContains, needle) + } + + appNames := make([]string, len(svcs)) + for i, svc := range svcs { + appNames[i] = svc.Snap + "." + svc.Name + } + c.Check(sort.StringsAreSorted(appNames), check.Equals, true) +} + func (s *appsSuite) TestGetAppsInfoBadSelect(c *check.C) { req, err := http.NewRequest("GET", "/v2/apps?select=potato", nil) c.Assert(err, check.IsNil) @@ -811,7 +971,7 @@ func (s *appsSuite) TestLogs(c *check.C) { s.expectLogsAccess() - s.jctlRCs = []io.ReadCloser{ioutil.NopCloser(strings.NewReader(` + s.jctlRCs = []io.ReadCloser{io.NopCloser(strings.NewReader(` {"MESSAGE": "hello1", "SYSLOG_IDENTIFIER": "xyzzy", "_PID": "42", "__REALTIME_TIMESTAMP": "42"} {"MESSAGE": "hello2", "SYSLOG_IDENTIFIER": "xyzzy", "_PID": "42", "__REALTIME_TIMESTAMP": "44"} {"MESSAGE": "hello3", "SYSLOG_IDENTIFIER": "xyzzy", "_PID": "42", "__REALTIME_TIMESTAMP": "46"} @@ -847,7 +1007,7 @@ s.expectLogsAccess() - s.jctlRCs = []io.ReadCloser{ioutil.NopCloser(strings.NewReader(""))} + s.jctlRCs = []io.ReadCloser{io.NopCloser(strings.NewReader(""))} req, err := http.NewRequest("GET", "/v2/logs?names=snap-a.svc2&n=42&follow=false", nil) c.Assert(err, check.IsNil) @@ -871,7 +1031,7 @@ s.expectLogsAccess() - s.jctlRCs = []io.ReadCloser{ioutil.NopCloser(strings.NewReader(""))} + s.jctlRCs = []io.ReadCloser{io.NopCloser(strings.NewReader(""))} req, err := http.NewRequest("GET", "/v2/logs?names=snap-a.svc2&n=42&follow=false", nil) c.Assert(err, check.IsNil) @@ -905,7 +1065,7 @@ {in: strconv.Itoa(math.MaxInt32), out: math.MaxInt32}, } { - s.jctlRCs = []io.ReadCloser{ioutil.NopCloser(strings.NewReader(""))} + s.jctlRCs = []io.ReadCloser{io.NopCloser(strings.NewReader(""))} s.jctlNs = nil req, err := http.NewRequest("GET", "/v2/logs?n="+t.in, nil) @@ -932,9 +1092,9 @@ s.expectLogsAccess() s.jctlRCs = []io.ReadCloser{ - ioutil.NopCloser(strings.NewReader("")), - ioutil.NopCloser(strings.NewReader("")), - ioutil.NopCloser(strings.NewReader("")), + io.NopCloser(strings.NewReader("")), + io.NopCloser(strings.NewReader("")), + io.NopCloser(strings.NewReader("")), } reqT, err := http.NewRequest("GET", "/v2/logs?follow=true", nil) diff -Nru snapd-2.62+23.10/daemon/api_base_test.go snapd-2.63+23.10/daemon/api_base_test.go --- snapd-2.62+23.10/daemon/api_base_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_base_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "context" "crypto" "fmt" - "io/ioutil" "net/http" "os" "os/user" @@ -537,7 +536,7 @@ }, nil, "") c.Assert(err, check.IsNil) - content, err := ioutil.ReadFile(snapInfo.MountFile()) + content, err := os.ReadFile(snapInfo.MountFile()) c.Assert(err, check.IsNil) h := sha3.Sum384(content) dgst, err := asserts.EncodeDigest(crypto.SHA3_384, h[:]) diff -Nru snapd-2.62+23.10/daemon/api_debug_pprof_test.go snapd-2.63+23.10/daemon/api_debug_pprof_test.go --- snapd-2.62+23.10/daemon/api_debug_pprof_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_debug_pprof_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,9 +21,10 @@ import ( "bytes" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "os" "gopkg.in/check.v1" ) @@ -49,10 +50,10 @@ c.Assert(rsp, check.NotNil) c.Assert(rsp.StatusCode, check.Equals, 200) - data, err := ioutil.ReadAll(rsp.Body) + data, err := io.ReadAll(rsp.Body) c.Assert(err, check.IsNil) - cmdline, err := ioutil.ReadFile("/proc/self/cmdline") + cmdline, err := os.ReadFile("/proc/self/cmdline") c.Assert(err, check.IsNil) cmdline = bytes.TrimRight(cmdline, "\x00") c.Assert(string(data), check.DeepEquals, string(cmdline)) diff -Nru snapd-2.62+23.10/daemon/api_download_test.go snapd-2.63+23.10/daemon/api_download_test.go --- snapd-2.62+23.10/daemon/api_download_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_download_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "encoding/base64" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strings" @@ -155,7 +154,7 @@ if resume > 0 { status = 206 } - return ioutil.NopCloser(bytes.NewReader([]byte(snapContent[resume:]))), status, nil + return io.NopCloser(bytes.NewReader([]byte(snapContent[resume:]))), status, nil } panic(fmt.Sprintf("internal error: trying to download %s but not in storeSnaps", name)) } diff -Nru snapd-2.62+23.10/daemon/api_general.go snapd-2.63+23.10/daemon/api_general.go --- snapd-2.62+23.10/daemon/api_general.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_general.go 2024-04-24 00:00:39.000000000 +0000 @@ -61,14 +61,14 @@ Path: "/v2/changes/{id}", GET: getChange, POST: abortChange, - ReadAccess: interfaceOpenAccess{Interface: "snap-refresh-observe"}, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}, WriteAccess: authenticatedAccess{Polkit: polkitActionManage}, } stateChangesCmd = &Command{ Path: "/v2/changes", GET: getChanges, - ReadAccess: interfaceOpenAccess{Interface: "snap-refresh-observe"}, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}, } warningsCmd = &Command{ diff -Nru snapd-2.62+23.10/daemon/api_general_test.go snapd-2.63+23.10/daemon/api_general_test.go --- snapd-2.62+23.10/daemon/api_general_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_general_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -52,7 +52,7 @@ } func (s *generalSuite) expectChangesReadAccess() { - s.expectReadAccess(daemon.InterfaceOpenAccess{Interface: "snap-refresh-observe"}) + s.expectReadAccess(daemon.InterfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}) } func (s *generalSuite) TestRoot(c *check.C) { diff -Nru snapd-2.62+23.10/daemon/api_notices.go snapd-2.63+23.10/daemon/api_notices.go --- snapd-2.62+23.10/daemon/api_notices.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_notices.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright (c) 2023 Canonical Ltd +// Copyright (c) 2023-2024 Canonical Ltd // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License version 3 as @@ -16,6 +16,7 @@ import ( "context" + "encoding/json" "errors" "fmt" "math" @@ -25,28 +26,38 @@ "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/overlord/auth" "github.com/snapcore/snapd/overlord/state" + "github.com/snapcore/snapd/snap/naming" "github.com/snapcore/snapd/strutil" ) var noticeReadInterfaces = map[state.NoticeType][]string{ state.ChangeUpdateNotice: {"snap-refresh-observe"}, state.RefreshInhibitNotice: {"snap-refresh-observe"}, + state.SnapRunInhibitNotice: {"snap-refresh-observe"}, } var ( noticesCmd = &Command{ - Path: "/v2/notices", - GET: getNotices, - ReadAccess: interfaceOpenAccess{Interface: "snap-refresh-observe"}, + Path: "/v2/notices", + GET: getNotices, + POST: postNotices, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}, + WriteAccess: openAccess{}, } noticeCmd = &Command{ Path: "/v2/notices/{id}", GET: getNotice, - ReadAccess: interfaceOpenAccess{Interface: "snap-refresh-observe"}, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}, } ) +// addedNotice is the result of adding a new notice. +type addedNotice struct { + // ID is the id of the newly added notice. + ID string `json:"id"` +} + func getNotices(c *Command, r *http.Request, user *auth.UserState) Response { query := r.URL.Query() @@ -172,6 +183,7 @@ // Construct the types filter which will be passed to state.Notices. func sanitizeNoticeTypesFilter(queryTypes []string, r *http.Request) ([]state.NoticeType, error) { typeStrs := strutil.MultiCommaSeparatedList(queryTypes) + alreadySeen := make(map[state.NoticeType]bool, len(typeStrs)) types := make([]state.NoticeType, 0, len(typeStrs)) for _, typeStr := range typeStrs { noticeType := state.NoticeType(typeStr) @@ -180,6 +192,10 @@ // with unknown types succeed). continue } + if alreadySeen[noticeType] { + continue + } + alreadySeen[noticeType] = true types = append(types, noticeType) } if len(types) == 0 { @@ -188,7 +204,7 @@ } // No types were specified, populate with notice types snap can view // with its connected interface. - ucred, iface, err := ucrednetGetWithInterface(r.RemoteAddr) + ucred, ifaces, err := ucrednetGetWithInterfaces(r.RemoteAddr) if err != nil { return nil, err } @@ -196,7 +212,16 @@ // Not connecting through snapd-snap.socket, should have read-access to all types. return nil, nil } - types = allowedNoticeTypesForInterface(iface) + for _, iface := range ifaces { + ifaceNoticeTypes := allowedNoticeTypesForInterface(iface) + for _, t := range ifaceNoticeTypes { + if alreadySeen[t] { + continue + } + alreadySeen[t] = true + types = append(types, t) + } + } if len(types) == 0 { return nil, errors.New("snap cannot access any notice type") } @@ -218,6 +243,71 @@ return types } +func postNotices(c *Command, r *http.Request, user *auth.UserState) Response { + requestUID, err := uidFromRequest(r) + if err != nil { + return Forbidden("cannot determine UID of request, so cannot create notice") + } + + decoder := json.NewDecoder(r.Body) + var inst noticeInstruction + if err := decoder.Decode(&inst); err != nil { + return BadRequest("cannot decode request body into notice instruction: %v", err) + } + + st := c.d.overlord.State() + st.Lock() + defer st.Unlock() + + if err := inst.validate(r); err != nil { + return err + } + + noticeId, err := st.AddNotice(&requestUID, state.SnapRunInhibitNotice, inst.Key, nil) + if err != nil { + return InternalError("%v", err) + } + + return SyncResponse(addedNotice{ID: noticeId}) +} + +type noticeInstruction struct { + Action string `json:"action"` + Type state.NoticeType `json:"type"` + Key string `json:"key"` + // NOTE: Data and RepeatAfter fields are not needed for snap-run-inhibit notices. +} + +func (inst *noticeInstruction) validate(r *http.Request) *apiError { + if inst.Action != "add" { + return BadRequest("invalid action %q", inst.Action) + } + if err := state.ValidateNotice(inst.Type, inst.Key, nil); err != nil { + return BadRequest("%s", err) + } + + switch inst.Type { + case state.SnapRunInhibitNotice: + return inst.validateSnapRunInhibitNotice(r) + default: + return BadRequest(`cannot add notice with invalid type %q (can only add "snap-run-inhibit" notices)`, inst.Type) + } +} + +func (inst *noticeInstruction) validateSnapRunInhibitNotice(r *http.Request) *apiError { + if fromSnapCmd, err := isRequestFromSnapCmd(r); err != nil { + return InternalError("cannot check request source: %v", err) + } else if !fromSnapCmd { + return Forbidden("only snap command can record notices") + } + + if err := naming.ValidateInstance(inst.Key); err != nil { + return BadRequest("invalid key: %v", err) + } + + return nil +} + func getNotice(c *Command, r *http.Request, user *auth.UserState) Response { requestUID, err := uidFromRequest(r) if err != nil { @@ -259,7 +349,7 @@ // noticeTypesViewableBySnap checks if passed interface allows the snap // to have read-access for the passed notice types. func noticeTypesViewableBySnap(types []state.NoticeType, r *http.Request) bool { - ucred, iface, err := ucrednetGetWithInterface(r.RemoteAddr) + ucred, ifaces, err := ucrednetGetWithInterfaces(r.RemoteAddr) if err != nil { return false } @@ -273,11 +363,15 @@ return false } +InterfaceTypeLoop: for _, noticeType := range types { allowedInterfaces := noticeReadInterfaces[noticeType] - if !strutil.ListContains(allowedInterfaces, iface) { - return false + for _, iface := range ifaces { + if strutil.ListContains(allowedInterfaces, iface) { + continue InterfaceTypeLoop + } } + return false } return true } diff -Nru snapd-2.62+23.10/daemon/api_notices_test.go snapd-2.63+23.10/daemon/api_notices_test.go --- snapd-2.62+23.10/daemon/api_notices_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_notices_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright (c) 2023 Canonical Ltd +// Copyright (c) 2023-2024 Canonical Ltd // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License version 3 as @@ -15,19 +15,25 @@ package daemon_test import ( + "bytes" "context" "encoding/json" "fmt" "net/http" "net/url" + "path/filepath" "strconv" + "strings" "time" . "gopkg.in/check.v1" "github.com/snapcore/snapd/daemon" "github.com/snapcore/snapd/dirs" + "github.com/snapcore/snapd/overlord/snapstate" + "github.com/snapcore/snapd/overlord/snapstate/snapstatetest" "github.com/snapcore/snapd/overlord/state" + "github.com/snapcore/snapd/snap" "github.com/snapcore/snapd/testutil" ) @@ -40,7 +46,8 @@ func (s *noticesSuite) SetUpTest(c *C) { s.apiBaseSuite.SetUpTest(c) - s.expectReadAccess(daemon.InterfaceOpenAccess{Interface: "snap-refresh-observe"}) + s.expectReadAccess(daemon.InterfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}) + s.expectWriteAccess(daemon.OpenAccess{}) } func (s *noticesSuite) TestNoticesFilterUserID(c *C) { @@ -143,7 +150,7 @@ addNotice(c, st, nil, state.RefreshInhibitNotice, "-", nil) st.Unlock() - req, err := http.NewRequest("GET", "/v2/notices?types=change-update&types=warning&types=refresh-inhibit", nil) + req, err := http.NewRequest("GET", "/v2/notices?types=change-update&types=warning,warning&types=refresh-inhibit", nil) c.Assert(err, IsNil) req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;", dirs.SnapdSocket) rsp := s.syncReq(c, req, nil) @@ -232,6 +239,7 @@ addNotice(c, st, nil, state.ChangeUpdateNotice, "123", nil) addNotice(c, st, nil, state.RefreshInhibitNotice, "-", nil) addNotice(c, st, nil, state.WarningNotice, "danger", nil) + addNotice(c, st, nil, state.SnapRunInhibitNotice, "snap-name", nil) st.Unlock() // Check that a snap request without specifying types filter only shows @@ -255,7 +263,7 @@ c.Check(rsp.Status, Equals, 200) notices, ok = rsp.Result.([]*state.Notice) c.Assert(ok, Equals, true) - c.Assert(notices, HasLen, 2) + c.Assert(notices, HasLen, 3) seenNoticeType := make(map[string]int) for _, notice := range notices { @@ -265,6 +273,7 @@ } c.Check(seenNoticeType["change-update"], Equals, 1) c.Check(seenNoticeType["refresh-inhibit"], Equals, 1) + c.Check(seenNoticeType["snap-run-inhibit"], Equals, 1) } func (s *noticesSuite) TestNoticesFilterTypesForSnap(c *C) { @@ -275,20 +284,21 @@ addNotice(c, st, nil, state.ChangeUpdateNotice, "123", nil) addNotice(c, st, nil, state.RefreshInhibitNotice, "-", nil) addNotice(c, st, nil, state.WarningNotice, "danger", nil) + addNotice(c, st, nil, state.SnapRunInhibitNotice, "snap-name", nil) st.Unlock() // Check that a snap request with types filter allows access to // snaps with required interfaces only. - // snap-refresh-observe interface allows accessing change-update notices - req, err := http.NewRequest("GET", "/v2/notices?types=change-update,refresh-inhibit", nil) + // snap-refresh-observe interface allows accessing change-update, refresh-inhibit and snap-run-inhibit notices + req, err := http.NewRequest("GET", "/v2/notices?types=change-update,refresh-inhibit,snap-run-inhibit", nil) c.Assert(err, IsNil) req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=snap-refresh-observe;", dirs.SnapSocket) rsp := s.syncReq(c, req, nil) c.Check(rsp.Status, Equals, 200) notices, ok := rsp.Result.([]*state.Notice) c.Assert(ok, Equals, true) - c.Assert(notices, HasLen, 2) + c.Assert(notices, HasLen, 3) seenNoticeType := make(map[string]int) for _, notice := range notices { @@ -298,6 +308,7 @@ } c.Check(seenNoticeType["change-update"], Equals, 1) c.Check(seenNoticeType["refresh-inhibit"], Equals, 1) + c.Check(seenNoticeType["snap-run-inhibit"], Equals, 1) } func (s *noticesSuite) TestNoticesFilterTypesForSnapForbidden(c *C) { @@ -308,6 +319,7 @@ addNotice(c, st, nil, state.ChangeUpdateNotice, "123", nil) addNotice(c, st, nil, state.RefreshInhibitNotice, "-", nil) addNotice(c, st, nil, state.WarningNotice, "danger", nil) + addNotice(c, st, nil, state.SnapRunInhibitNotice, "snap-name", nil) st.Unlock() // Check that a snap request with types filter denies access to @@ -341,8 +353,15 @@ rsp = s.errorReq(c, req, nil) c.Check(rsp.Status, Equals, 403) + // snap-themes-control doesn't give access to snap-run-inhibit notices. + req, err = http.NewRequest("GET", "/v2/notices?types=snap-run-inhibit", nil) + c.Assert(err, IsNil) + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=snap-themes-control;", dirs.SnapSocket) + rsp = s.errorReq(c, req, nil) + c.Check(rsp.Status, Equals, 403) + // No interfaces connected. - req, err = http.NewRequest("GET", "/v2/notices?types=change-update,refresh-inhibit", nil) + req, err = http.NewRequest("GET", "/v2/notices?types=change-update,refresh-inhibit,snap-run-inhibit", nil) c.Assert(err, IsNil) req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=;", dirs.SnapSocket) rsp = s.errorReq(c, req, nil) @@ -663,6 +682,314 @@ c.Assert(rsp.Message, Matches, errorMatch) } +// Check that duplicate explicitly-given notice types are removed from filter. +func (s *noticesSuite) TestSanitizeNoticeTypesFilterDuplicateGivenTypes(c *C) { + typeStrs := []string{ + string(state.ChangeUpdateNotice), + fmt.Sprintf( + "%s,%s,%s", + state.WarningNotice, + state.ChangeUpdateNotice, + state.RefreshInhibitNotice, + ), + string(state.WarningNotice), + string(state.RefreshInhibitNotice), + string(state.WarningNotice), + string(state.ChangeUpdateNotice), + } + types := []state.NoticeType{ + state.ChangeUpdateNotice, + state.WarningNotice, + state.RefreshInhibitNotice, + } + // Request unnecessary since explicitly-specified types are validated later. + result, err := daemon.SanitizeNoticeTypesFilter(typeStrs, nil) + c.Assert(err, IsNil) + c.Check(result, DeepEquals, types) +} + +// Check that notice types granted by default by multiple connected interfaces +// are only included once in the filter. +func (s *noticesSuite) TestSanitizeNoticeTypesFilterDuplicateDefaultTypes(c *C) { + types := []state.NoticeType{ + state.NoticeType("foo"), + state.NoticeType("bar"), + state.NoticeType("baz"), + } + ifaces := []string{ + "abc", + "xyz", + "123", + } + fakeNoticeReadInterfaces := map[state.NoticeType][]string{ + types[0]: {ifaces[0], ifaces[1]}, + types[1]: {ifaces[1], ifaces[2]}, + types[2]: {ifaces[2]}, + } + restore := daemon.MockNoticeReadInterfaces(fakeNoticeReadInterfaces) + defer restore() + + // Check that multiple interfaces which grant the same notice type do not + // result in duplicates of that type + req, err := http.NewRequest("GET", "/v2/notices", nil) + c.Assert(err, IsNil) + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=%s&%s;", dirs.SnapSocket, ifaces[0], ifaces[1]) + result, err := daemon.SanitizeNoticeTypesFilter(nil, req) + c.Assert(err, IsNil) + c.Check(result, DeepEquals, types[:2]) +} + +// Check that requests for notice types granted by multiple connected interfaces +// behave correctly. +func (s *noticesSuite) TestNoticeTypesViewableBySnap(c *C) { + types := []state.NoticeType{ + state.NoticeType("foo"), + state.NoticeType("bar"), + state.NoticeType("baz"), + } + ifaces := []string{ + "abc", + "xyz", + "123", + } + fakeNoticeReadInterfaces := map[state.NoticeType][]string{ + types[0]: {ifaces[0], ifaces[1]}, + types[1]: {ifaces[1], ifaces[2]}, + types[2]: {ifaces[2]}, + } + restore := daemon.MockNoticeReadInterfaces(fakeNoticeReadInterfaces) + defer restore() + + // Check notice types granted by different connected interfaces. + req, err := http.NewRequest("GET", "/v2/notices", nil) + c.Assert(err, IsNil) + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=%s&%s;", dirs.SnapSocket, ifaces[0], ifaces[2]) + requestedTypes := []state.NoticeType{types[0], types[1], types[2]} + viewable := daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, true) + + // Check notice types granted by the same connected interface. + req, err = http.NewRequest("GET", "/v2/notices", nil) + c.Assert(err, IsNil) + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=%s&%s;", dirs.SnapSocket, ifaces[0], ifaces[1]) + // Types viewable by both interfaces + requestedTypes = []state.NoticeType{types[0]} + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, true) + // Types viewable by at least one interface + requestedTypes = []state.NoticeType{types[0], types[1]} + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, true) + // Type not viewable by any interface + requestedTypes = []state.NoticeType{types[2]} + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, false) + // Mix of viewable and unviewable types + requestedTypes = []state.NoticeType{types[0], types[2]} + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, false) + + // Check no types results in not viewable, no matter what + requestedTypes = make([]state.NoticeType, 0) + req, err = http.NewRequest("GET", "/v2/notices", nil) + c.Assert(err, IsNil) + // No "iface=" field given + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;", dirs.SnapSocket) + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, false) + // Empty "iface=" field + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=;", dirs.SnapSocket) + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, false) + // Non-empty "iface=" field + req.RemoteAddr = fmt.Sprintf("pid=100;uid=1000;socket=%s;iface=snap-refresh-observe;", dirs.SnapSocket) + viewable = daemon.NoticeTypesViewableBySnap(requestedTypes, req) + c.Check(viewable, Equals, false) +} + +func (s *noticesSuite) TestAddNotice(c *C) { + s.daemon(c) + + // mock request coming from snap command + restore := daemon.MockOsReadlink(func(path string) (string, error) { + c.Check(path, Equals, "/proc/100/exe") + return filepath.Join(dirs.GlobalRootDir, "/usr/bin/snap"), nil + }) + defer restore() + + st := s.d.Overlord().State() + st.Lock() + // mock existing snap + snapstate.Set(st, "snap-name", &snapstate.SnapState{ + Active: true, + Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{{RealName: "snap-name", Revision: snap.R(2)}}), + }) + st.Unlock() + + start := time.Now() + body := []byte(`{ + "action": "add", + "type": "snap-run-inhibit", + "key": "snap-name" + }`) + req, err := http.NewRequest("POST", "/v2/notices", bytes.NewReader(body)) + c.Assert(err, IsNil) + req.RemoteAddr = "pid=100;uid=1000;socket=;" + rsp := s.syncReq(c, req, nil) + c.Assert(rsp.Status, Equals, 200) + + resultBytes, err := json.Marshal(rsp.Result) + c.Assert(err, IsNil) + + st.Lock() + notices := st.Notices(nil) + st.Unlock() + c.Assert(notices, HasLen, 1) + n := noticeToMap(c, notices[0]) + noticeID, ok := n["id"].(string) + c.Assert(ok, Equals, true) + c.Assert(string(resultBytes), Equals, `{"id":"`+noticeID+`"}`) + + firstOccurred, err := time.Parse(time.RFC3339, n["first-occurred"].(string)) + c.Assert(err, IsNil) + c.Assert(firstOccurred.After(start), Equals, true) + lastOccurred, err := time.Parse(time.RFC3339, n["last-occurred"].(string)) + c.Assert(err, IsNil) + c.Assert(lastOccurred.Equal(firstOccurred), Equals, true) + lastRepeated, err := time.Parse(time.RFC3339, n["last-repeated"].(string)) + c.Assert(err, IsNil) + c.Assert(lastRepeated.Equal(firstOccurred), Equals, true) + + delete(n, "first-occurred") + delete(n, "last-occurred") + delete(n, "last-repeated") + c.Assert(n, DeepEquals, map[string]any{ + "id": noticeID, + "user-id": 1000.0, + "type": "snap-run-inhibit", + "key": "snap-name", + "occurrences": 1.0, + "expire-after": "168h0m0s", + }) +} + +func (s *noticesSuite) TestAddNoticeInvalidRequestUid(c *C) { + s.daemon(c) + + body := []byte(`{ + "action": "add", + "type": "snap-run-inhibit", + "key": "snap-name" + }`) + req, err := http.NewRequest("POST", "/v2/notices", bytes.NewReader(body)) + c.Assert(err, IsNil) + req.RemoteAddr = "pid=100;uid=;socket=;" + rsp := s.errorReq(c, req, nil) + c.Assert(rsp.Status, Equals, 403) +} + +func (s *noticesSuite) TestAddNoticeInvalidAction(c *C) { + s.testAddNoticeBadRequest(c, `{"action": "bad"}`, "invalid action.*") +} + +func (s *noticesSuite) TestAddNoticeInvalidTypeUnkown(c *C) { + s.testAddNoticeBadRequest(c, `{"action": "add", "type": "foo"}`, `cannot add notice with invalid type "foo"`) +} + +func (s *noticesSuite) TestAddNoticeInvalidTypeKnown(c *C) { + s.testAddNoticeBadRequest(c, `{"action": "add", "type": "change-update", "key": "test"}`, "cannot add notice with invalid type.*") +} + +func (s *noticesSuite) TestAddNoticeEmptyKey(c *C) { + s.testAddNoticeBadRequest(c, `{"action": "add", "type": "snap-run-inhibit", "key": ""}`, `cannot add snap-run-inhibit notice with invalid key ""`) +} + +func (s *noticesSuite) TestAddNoticeKeyTooLong(c *C) { + request, err := json.Marshal(map[string]any{ + "action": "add", + "type": "snap-run-inhibit", + "key": strings.Repeat("x", 257), + }) + c.Assert(err, IsNil) + s.testAddNoticeBadRequest(c, string(request), "cannot add snap-run-inhibit notice with invalid key: key must be 256 bytes or less") +} + +func (s *noticesSuite) TestAddNoticeInvalidSnapName(c *C) { + s.testAddNoticeBadRequest(c, `{"action": "add", "type": "snap-run-inhibit", "key": "Snap-Name"}`, `invalid key: invalid snap name: "Snap-Name"`) +} + +func (s *noticesSuite) testAddNoticeBadRequest(c *C, body, errorMatch string) { + s.daemon(c) + + // mock request coming from snap command + restore := daemon.MockOsReadlink(func(path string) (string, error) { + c.Check(path, Equals, "/proc/100/exe") + return filepath.Join(dirs.GlobalRootDir, "/usr/bin/snap"), nil + }) + defer restore() + + req, err := http.NewRequest("POST", "/v2/notices", strings.NewReader(body)) + c.Assert(err, IsNil) + req.RemoteAddr = "pid=100;uid=1000;socket=;" + rsp := s.errorReq(c, req, nil) + c.Check(rsp.Status, Equals, 400) + c.Assert(rsp.Message, Matches, errorMatch) +} + +func (s *noticesSuite) TestAddNoticesSnapCmdNoReexec(c *C) { + s.testAddNoticesSnapCmd(c, filepath.Join(dirs.GlobalRootDir, "/usr/bin/snap"), false) +} + +func (s *noticesSuite) TestAddNoticesSnapCmdReexecSnapd(c *C) { + s.testAddNoticesSnapCmd(c, filepath.Join(dirs.SnapMountDir, "snapd/11/usr/bin/snap"), false) +} + +func (s *noticesSuite) TestAddNoticesSnapCmdReexecCore(c *C) { + s.testAddNoticesSnapCmd(c, filepath.Join(dirs.SnapMountDir, "core/12/usr/bin/snap"), false) +} + +func (s *noticesSuite) TestAddNoticesSnapCmdUnknownBinary(c *C) { + s.testAddNoticesSnapCmd(c, filepath.Join(dirs.SnapMountDir, "bad-c0re/12/usr/bin/snap"), true) +} + +func (s *noticesSuite) testAddNoticesSnapCmd(c *C, exePath string, shouldFail bool) { + s.daemon(c) + + // mock request coming from snap command + restore := daemon.MockOsReadlink(func(path string) (string, error) { + c.Check(path, Equals, "/proc/100/exe") + return exePath, nil + }) + defer restore() + + st := s.d.Overlord().State() + st.Lock() + // mock existing snap + snapstate.Set(st, "snap-name", &snapstate.SnapState{ + Active: true, + Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{{RealName: "snap-name", Revision: snap.R(2)}}), + }) + st.Unlock() + + body := []byte(`{ + "action": "add", + "type": "snap-run-inhibit", + "key": "snap-name" + }`) + req, err := http.NewRequest("POST", "/v2/notices", bytes.NewReader(body)) + c.Assert(err, IsNil) + req.RemoteAddr = "pid=100;uid=1000;socket=;" + + if shouldFail { + rsp := s.errorReq(c, req, nil) + c.Check(rsp.Status, Equals, 403) + c.Assert(rsp.Message, Matches, "only snap command can record notices") + } else { + rsp := s.syncReq(c, req, nil) + c.Assert(rsp.Status, Equals, 200) + } +} + func (s *noticesSuite) TestNotice(c *C) { s.daemon(c) diff -Nru snapd-2.62+23.10/daemon/api_sideload_n_try_test.go snapd-2.63+23.10/daemon/api_sideload_n_try_test.go --- snapd-2.62+23.10/daemon/api_sideload_n_try_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_sideload_n_try_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "crypto/rand" "errors" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -558,7 +557,7 @@ version: 1`, nil) digest, size, err := asserts.SnapFileSHA3_384(fooSnap) c.Assert(err, check.IsNil) - fooSnapBytes, err := ioutil.ReadFile(fooSnap) + fooSnapBytes, err := os.ReadFile(fooSnap) c.Assert(err, check.IsNil) dev1Acct := assertstest.NewAccount(s.StoreSigning, "devel1", nil, "") @@ -807,7 +806,7 @@ chgSummary, _ := s.sideloadCheck(c, sideLoadBodyWithoutDevMode, head, "local", snapstate.Flags{RemoveSnapPath: true, Transaction: client.TransactionPerSnap}) c.Check(chgSummary, check.Equals, `Install "local" snap from file "a/b/local.snap"`) - files, err := ioutil.ReadDir(tmpDir) + files, err := os.ReadDir(tmpDir) c.Assert(err, check.IsNil) c.Assert(files, check.HasLen, 0) @@ -1158,7 +1157,7 @@ // unasserted snap twoSnap := snaptest.MakeTestSnapWithFiles(c, `name: two version: 1`, nil) - twoSnapData, err := ioutil.ReadFile(twoSnap) + twoSnapData, err := os.ReadFile(twoSnap) c.Assert(err, check.IsNil) snapData = append(snapData, twoSnapData) @@ -1186,7 +1185,7 @@ version: 1`, snap), nil) digest, size, err := asserts.SnapFileSHA3_384(thisSnap) c.Assert(err, check.IsNil) - thisSnapData, err := ioutil.ReadFile(thisSnap) + thisSnapData, err := os.ReadFile(thisSnap) c.Assert(err, check.IsNil) snapData = append(snapData, thisSnapData) diff -Nru snapd-2.62+23.10/daemon/api_snaps.go snapd-2.63+23.10/daemon/api_snaps.go --- snapd-2.62+23.10/daemon/api_snaps.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_snaps.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2015-2022 Canonical Ltd + * Copyright (C) 2015-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -59,7 +59,7 @@ Path: "/v2/snaps", GET: getSnapsInfo, POST: postSnaps, - ReadAccess: interfaceOpenAccess{Interface: "snap-refresh-observe"}, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}, WriteAccess: authenticatedAccess{Polkit: polkitActionManage}, } ) diff -Nru snapd-2.62+23.10/daemon/api_snaps_test.go snapd-2.63+23.10/daemon/api_snaps_test.go --- snapd-2.62+23.10/daemon/api_snaps_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_snaps_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2014-2022 Canonical Ltd + * Copyright (C) 2014-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -69,7 +69,7 @@ } func (s *snapsSuite) expectSnapsReadAccess() { - s.expectReadAccess(daemon.InterfaceOpenAccess{Interface: "snap-refresh-observe"}) + s.expectReadAccess(daemon.InterfaceOpenAccess{Interfaces: []string{"snap-refresh-observe"}}) } func (s *snapsSuite) TestSnapsInfoIntegration(c *check.C) { @@ -1410,10 +1410,14 @@ switch snap["name"] { case "snap-a": refreshInhibit := snap["refresh-inhibit"].(map[string]interface{}) - c.Assert(refreshInhibit["proceed-time"], check.Equals, expectedProceedTimeA.Format(time.RFC3339Nano), testCmt) + proceedTime, err := time.Parse(time.RFC3339Nano, refreshInhibit["proceed-time"].(string)) + c.Assert(err, check.IsNil) + c.Assert(proceedTime.Equal(expectedProceedTimeA), check.Equals, true, testCmt) case "snap-b": refreshInhibit := snap["refresh-inhibit"].(map[string]interface{}) - c.Assert(refreshInhibit["proceed-time"], check.Equals, expectedProceedTimeB.Format(time.RFC3339Nano), testCmt) + proceedTime, err := time.Parse(time.RFC3339Nano, refreshInhibit["proceed-time"].(string)) + c.Assert(err, check.IsNil) + c.Assert(proceedTime.Equal(expectedProceedTimeB), check.Equals, true, testCmt) case "snap-c": _, ok := snap["refresh-inhibit"] c.Assert(ok, check.Equals, false) @@ -1463,19 +1467,23 @@ rsp := s.jsonReq(c, req, nil) snaps := snapList(rsp.Result) + c.Assert(snaps, check.HasLen, 2) for _, snap := range snaps { testCmt := check.Commentf("snap %s failed", snap["name"]) switch snap["name"] { case "snap-a": refreshInhibit := snap["refresh-inhibit"].(map[string]interface{}) - c.Assert(refreshInhibit["proceed-time"], check.Equals, expectedProceedTimeA.Format(time.RFC3339Nano), testCmt) + proceedTime, err := time.Parse(time.RFC3339Nano, refreshInhibit["proceed-time"].(string)) + c.Assert(err, check.IsNil) + c.Assert(proceedTime.Equal(expectedProceedTimeA), check.Equals, true, testCmt) case "snap-b": refreshInhibit := snap["refresh-inhibit"].(map[string]interface{}) - c.Assert(refreshInhibit["proceed-time"], check.Equals, expectedProceedTimeB.Format(time.RFC3339Nano), testCmt) + proceedTime, err := time.Parse(time.RFC3339Nano, refreshInhibit["proceed-time"].(string)) + c.Assert(err, check.IsNil) + c.Assert(proceedTime.Equal(expectedProceedTimeB), check.Equals, true, testCmt) case "snap-c": - _, ok := snap["refresh-inhibit"] - c.Assert(ok, check.Equals, false) + c.Error("snap-c should not be listed") } } } diff -Nru snapd-2.62+23.10/daemon/api_snapshots_test.go snapd-2.63+23.10/daemon/api_snapshots_test.go --- snapd-2.62+23.10/daemon/api_snapshots_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_snapshots_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "errors" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -449,7 +448,7 @@ var dataRead int defer daemon.MockSnapshotImport(func(ctx context.Context, st *state.State, r io.Reader) (uint64, []string, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) c.Assert(err, check.IsNil) dataRead = len(data) return uint64(0), nil, nil diff -Nru snapd-2.62+23.10/daemon/api_themes.go snapd-2.63+23.10/daemon/api_themes.go --- snapd-2.62+23.10/daemon/api_themes.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_themes.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2020 Canonical Ltd + * Copyright (C) 2020-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -45,8 +45,8 @@ Path: "/v2/accessories/themes", GET: checkThemes, POST: installThemes, - ReadAccess: interfaceOpenAccess{Interface: "snap-themes-control"}, - WriteAccess: interfaceAuthenticatedAccess{Interface: "snap-themes-control", Polkit: polkitActionManage}, + ReadAccess: interfaceOpenAccess{Interfaces: []string{"snap-themes-control"}}, + WriteAccess: interfaceAuthenticatedAccess{Interfaces: []string{"snap-themes-control"}, Polkit: polkitActionManage}, } ) diff -Nru snapd-2.62+23.10/daemon/api_themes_test.go snapd-2.63+23.10/daemon/api_themes_test.go --- snapd-2.62+23.10/daemon/api_themes_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/api_themes_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2020 Canonical Ltd + * Copyright (C) 2020-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -72,8 +72,8 @@ } func (s *themesSuite) expectThemesAccess() { - s.expectReadAccess(daemon.InterfaceOpenAccess{Interface: "snap-themes-control"}) - s.expectWriteAccess(daemon.InterfaceAuthenticatedAccess{Interface: "snap-themes-control", Polkit: "io.snapcraft.snapd.manage"}) + s.expectReadAccess(daemon.InterfaceOpenAccess{Interfaces: []string{"snap-themes-control"}}) + s.expectWriteAccess(daemon.InterfaceAuthenticatedAccess{Interfaces: []string{"snap-themes-control"}, Polkit: "io.snapcraft.snapd.manage"}) } func (s *themesSuite) TestInstalledThemes(c *C) { diff -Nru snapd-2.62+23.10/daemon/daemon_test.go snapd-2.63+23.10/daemon/daemon_test.go --- snapd-2.62+23.10/daemon/daemon_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/daemon_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -801,7 +801,7 @@ res, err := http.Get(fmt.Sprintf("http://%s/endp", snapdL.Addr())) c.Assert(err, check.IsNil) c.Check(res.StatusCode, check.Equals, 200) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) res.Body.Close() c.Assert(err, check.IsNil) c.Check(string(body), check.Equals, "OKOK") @@ -1044,7 +1044,7 @@ // finally check that maintenance.json was written appropriate for this // restart reason - b, err := ioutil.ReadFile(dirs.SnapdMaintenanceFile) + b, err := os.ReadFile(dirs.SnapdMaintenanceFile) c.Assert(err, check.IsNil) maintErr := &errorResult{} diff -Nru snapd-2.62+23.10/daemon/export_access_test.go snapd-2.63+23.10/daemon/export_access_test.go --- snapd-2.62+23.10/daemon/export_access_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/export_access_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2021 Canonical Ltd + * Copyright (C) 2021-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -64,7 +64,7 @@ var RequireInterfaceApiAccessImpl = requireInterfaceApiAccessImpl -func MockRequireInterfaceApiAccess(new func(d *Daemon, r *http.Request, ucred *ucrednet, interfaceName string) *apiError) (restore func()) { +func MockRequireInterfaceApiAccess(new func(d *Daemon, r *http.Request, ucred *ucrednet, interfaceNames []string) *apiError) (restore func()) { old := requireInterfaceApiAccess requireInterfaceApiAccess = new return func() { diff -Nru snapd-2.62+23.10/daemon/export_api_notices_test.go snapd-2.63+23.10/daemon/export_api_notices_test.go --- snapd-2.62+23.10/daemon/export_api_notices_test.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/daemon/export_api_notices_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,37 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package daemon + +import ( + "github.com/snapcore/snapd/overlord/state" +) + +var ( + SanitizeNoticeTypesFilter = sanitizeNoticeTypesFilter + NoticeTypesViewableBySnap = noticeTypesViewableBySnap +) + +func MockNoticeReadInterfaces(newMap map[state.NoticeType][]string) (restore func()) { + old := noticeReadInterfaces + noticeReadInterfaces = newMap + return func() { + noticeReadInterfaces = old + } +} diff -Nru snapd-2.62+23.10/daemon/export_test.go snapd-2.63+23.10/daemon/export_test.go --- snapd-2.62+23.10/daemon/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -29,6 +29,7 @@ "github.com/snapcore/snapd/asserts/snapasserts" "github.com/snapcore/snapd/boot" + "github.com/snapcore/snapd/client/clientutil" "github.com/snapcore/snapd/overlord" "github.com/snapcore/snapd/overlord/assertstate" "github.com/snapcore/snapd/overlord/restart" @@ -378,3 +379,17 @@ systemUserFromRequest = f return restore } + +func MockOsReadlink(f func(string) (string, error)) func() { + old := osReadlink + osReadlink = f + return func() { + osReadlink = old + } +} + +func MockNewStatusDecorator(f func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator) (restore func()) { + restore = testutil.Backup(&newStatusDecorator) + newStatusDecorator = f + return restore +} diff -Nru snapd-2.62+23.10/daemon/ucrednet.go snapd-2.63+23.10/daemon/ucrednet.go --- snapd-2.62+23.10/daemon/ucrednet.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/ucrednet.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2015 Canonical Ltd + * Copyright (C) 2015-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -25,8 +25,11 @@ "net" "regexp" "strconv" + "strings" "sync" sys "syscall" + + "github.com/snapcore/snapd/strutil" ) var errNoID = errors.New("no pid/uid found") @@ -39,14 +42,14 @@ var raddrRegexp = regexp.MustCompile(`^pid=(\d+);uid=(\d+);socket=([^;]*);(iface=([^;]*);)?$`) var ucrednetGet = ucrednetGetImpl -var ucrednetGetWithInterface = ucrednetGetWithInterfaceImpl +var ucrednetGetWithInterfaces = ucrednetGetWithInterfacesImpl func ucrednetGetImpl(remoteAddr string) (*ucrednet, error) { - uc, _, err := ucrednetGetWithInterface(remoteAddr) + uc, _, err := ucrednetGetWithInterfaces(remoteAddr) return uc, err } -func ucrednetGetWithInterfaceImpl(remoteAddr string) (ucred *ucrednet, iface string, err error) { +func ucrednetGetWithInterfacesImpl(remoteAddr string) (ucred *ucrednet, ifaces []string, err error) { // NOTE treat remoteAddr at one point included a user-controlled // string. In case that happens again by accident, treat it as tainted, // and be very suspicious of it. @@ -62,20 +65,42 @@ if v, err := strconv.ParseUint(subs[2], 10, 32); err == nil { u.Uid = uint32(v) } + // group: ([^;]*) - socket path following socket= u.Socket = subs[3] - if len(subs) == 6 { - iface = subs[5] + // group: (iface=([^;]*);) + if len(subs[4]) > 0 { + // group: ([^;]*) - actual interfaces joined together with & separator + ifaces = strings.Split(subs[5], "&") } } if u.Pid == ucrednetNoProcess || u.Uid == ucrednetNobody { - return nil, "", errNoID + return nil, nil, errNoID } - return u, iface, nil + return u, ifaces, nil } func ucrednetAttachInterface(remoteAddr, iface string) string { - return fmt.Sprintf("%siface=%s;", remoteAddr, iface) + inds := raddrRegexp.FindStringSubmatchIndex(remoteAddr) + if inds == nil { + // This should only occur if remoteAddr is invalid. + return fmt.Sprintf("%siface=%s;", remoteAddr, iface) + } + // start of string matching group "(iface=([^;]*);)" + ifaceSubStart := inds[8] + ifaceSubEnd := inds[9] + if ifaceSubStart == ifaceSubEnd { + // "(iface=([^;]*);)" not present. + return fmt.Sprintf("%siface=%s;", remoteAddr, iface) + } + // string matching group "([^;]*)" within "(iface=([^;]*);)" + ifacesStr := remoteAddr[inds[10]:inds[11]] + ifaces := strings.Split(ifacesStr, "&") + if strutil.ListContains(ifaces, iface) { + return remoteAddr + } + ifaces = append(ifaces, iface) + return fmt.Sprintf("%siface=%s;", remoteAddr[:ifaceSubStart], strings.Join(ifaces, "&")) } type ucrednet struct { diff -Nru snapd-2.62+23.10/daemon/ucrednet_test.go snapd-2.63+23.10/daemon/ucrednet_test.go --- snapd-2.62+23.10/daemon/ucrednet_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/daemon/ucrednet_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2015 Canonical Ltd + * Copyright (C) 2015-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -200,30 +200,70 @@ } func (s *ucrednetSuite) TestGetWithInterface(c *check.C) { - u, iface, err := ucrednetGetWithInterface("pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe;") + u, ifaces, err := ucrednetGetWithInterfaces("pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe;") c.Assert(err, check.IsNil) c.Check(u.Pid, check.Equals, int32(100)) c.Check(u.Uid, check.Equals, uint32(42)) c.Check(u.Socket, check.Equals, "/run/snap.socket") - c.Check(iface, check.Equals, "snap-refresh-observe") + c.Check(ifaces, check.DeepEquals, []string{"snap-refresh-observe"}) // iface is optional - u, iface, err = ucrednetGetWithInterface("pid=100;uid=42;socket=/run/snap.socket;") + u, ifaces, err = ucrednetGetWithInterfaces("pid=100;uid=42;socket=/run/snap.socket;") c.Assert(err, check.IsNil) c.Check(u.Pid, check.Equals, int32(100)) c.Check(u.Uid, check.Equals, uint32(42)) c.Check(u.Socket, check.Equals, "/run/snap.socket") - c.Check(iface, check.Equals, "") + c.Check(ifaces, check.IsNil) } func (s *ucrednetSuite) TestAttachInterface(c *check.C) { remoteAddr := ucrednetAttachInterface("pid=100;uid=42;socket=/run/snap.socket;", "snap-refresh-observe") c.Check(remoteAddr, check.Equals, "pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe;") - u, iface, err := ucrednetGetWithInterface(remoteAddr) + u, ifaces, err := ucrednetGetWithInterfaces(remoteAddr) c.Assert(err, check.IsNil) c.Check(u.Pid, check.Equals, int32(100)) c.Check(u.Uid, check.Equals, uint32(42)) c.Check(u.Socket, check.Equals, "/run/snap.socket") - c.Check(iface, check.Equals, "snap-refresh-observe") + c.Check(ifaces, check.DeepEquals, []string{"snap-refresh-observe"}) +} + +func (s *ucrednetSuite) TestAttachInterfaceRepeatedly(c *check.C) { + remoteAddr := "pid=100;uid=42;socket=/run/snap.socket;" + for i := 0; i < 2; i++ { + remoteAddr = ucrednetAttachInterface(remoteAddr, "snap-refresh-observe") + c.Check(remoteAddr, check.Equals, "pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe;") + + u, ifaces, err := ucrednetGetWithInterfaces(remoteAddr) + c.Assert(err, check.IsNil) + c.Check(u.Pid, check.Equals, int32(100)) + c.Check(u.Uid, check.Equals, uint32(42)) + c.Check(u.Socket, check.Equals, "/run/snap.socket") + c.Check(ifaces, check.DeepEquals, []string{"snap-refresh-observe"}) + } +} + +func (s *ucrednetSuite) TestAttachInterfaceMultiple(c *check.C) { + remoteAddr := ucrednetAttachInterface("pid=100;uid=42;socket=/run/snap.socket;", "snap-refresh-observe") + c.Check(remoteAddr, check.Equals, "pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe;") + + remoteAddr = ucrednetAttachInterface(remoteAddr, "snap-prompting-control") + c.Check(remoteAddr, check.Equals, "pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe&snap-prompting-control;") + + remoteAddr = ucrednetAttachInterface(remoteAddr, "snap-refresh-observe") + c.Check(remoteAddr, check.Equals, "pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe&snap-prompting-control;") + + remoteAddr = ucrednetAttachInterface(remoteAddr, "foo") + c.Check(remoteAddr, check.Equals, "pid=100;uid=42;socket=/run/snap.socket;iface=snap-refresh-observe&snap-prompting-control&foo;") + + u, ifaces, err := ucrednetGetWithInterfaces(remoteAddr) + c.Assert(err, check.IsNil) + c.Check(u.Pid, check.Equals, int32(100)) + c.Check(u.Uid, check.Equals, uint32(42)) + c.Check(u.Socket, check.Equals, "/run/snap.socket") + c.Check(ifaces, check.DeepEquals, []string{ + "snap-refresh-observe", + "snap-prompting-control", + "foo", + }) } diff -Nru snapd-2.62+23.10/data/selinux/snappy.te snapd-2.63+23.10/data/selinux/snappy.te --- snapd-2.62+23.10/data/selinux/snappy.te 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/data/selinux/snappy.te 2024-04-24 00:00:39.000000000 +0000 @@ -761,6 +761,10 @@ seutil_read_file_contexts(snappy_confine_t) term_mount_pty_fs(snappy_confine_t) +# check if /run/systemd/container +# note, it's unlikely we're ever going to need to read that file on Fedora +init_search_pid_dirs(snappy_confine_t) + # device group fs_manage_cgroup_dirs(snappy_confine_t) fs_manage_cgroup_files(snappy_confine_t) diff -Nru snapd-2.62+23.10/data/systemd/snapd.service.in snapd-2.63+23.10/data/systemd/snapd.service.in --- snapd-2.62+23.10/data/systemd/snapd.service.in 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/data/systemd/snapd.service.in 2024-04-24 00:00:39.000000000 +0000 @@ -20,6 +20,7 @@ Restart=always WatchdogSec=5m Type=notify +NotifyAccess=all SuccessExitStatus=42 RestartPreventExitStatus=42 KillMode=process diff -Nru snapd-2.62+23.10/debian/changelog snapd-2.63+23.10/debian/changelog --- snapd-2.62+23.10/debian/changelog 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/debian/changelog 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,55 @@ -snapd (2.62+23.10) mantic; urgency=medium +snapd (2.63+23.10) mantic; urgency=medium + + * New upstream release, LP: #2061179 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + + -- Ernest Lotter Wed, 24 Apr 2024 02:00:39 +0200 + +snapd (2.62) xenial; urgency=medium * New upstream release, LP: #2058277 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/desktop/notification/notificationtest/fdo.go snapd-2.63+23.10/desktop/notification/notificationtest/fdo.go --- snapd-2.62+23.10/desktop/notification/notificationtest/fdo.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/desktop/notification/notificationtest/fdo.go 2024-04-24 00:00:39.000000000 +0000 @@ -92,6 +92,9 @@ // If not nil, all the fdoApi methods will return the provided error // in place of performing their usual task. func (server *FdoServer) SetError(err *dbus.Error) { + server.mu.Lock() + defer server.mu.Unlock() + server.err = err } @@ -136,6 +139,9 @@ } func (a fdoApi) GetCapabilities() ([]string, *dbus.Error) { + a.server.mu.Lock() + defer a.server.mu.Unlock() + if a.server.err != nil { return nil, a.server.err } @@ -144,13 +150,13 @@ } func (a fdoApi) Notify(appName string, replacesID uint32, icon, summary, body string, actions []string, hints map[string]dbus.Variant, expires int32) (uint32, *dbus.Error) { + a.server.mu.Lock() + defer a.server.mu.Unlock() + if a.server.err != nil { return 0, a.server.err } - a.server.mu.Lock() - defer a.server.mu.Unlock() - a.server.lastID += 1 notification := &FdoNotification{ ID: a.server.lastID, @@ -171,8 +177,13 @@ } func (a fdoApi) CloseNotification(id uint32) *dbus.Error { - if a.server.err != nil { + dErr := func() *dbus.Error { + a.server.mu.Lock() + defer a.server.mu.Unlock() return a.server.err + }() + if dErr != nil { + return dErr } // close reason 3 is "closed by a call to CloseNotification" @@ -187,6 +198,9 @@ } func (a fdoApi) GetServerInformation() (name, vendor, version, specVersion string, err *dbus.Error) { + a.server.mu.Lock() + defer a.server.mu.Unlock() + if a.server.err != nil { return "", "", "", "", a.server.err } diff -Nru snapd-2.62+23.10/desktop/portal/document_test.go snapd-2.63+23.10/desktop/portal/document_test.go --- snapd-2.62+23.10/desktop/portal/document_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/desktop/portal/document_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,6 +24,7 @@ "os" "os/user" "path/filepath" + "sync" "github.com/godbus/dbus" . "gopkg.in/check.v1" @@ -46,7 +47,9 @@ getMountPointError *dbus.Error mountPointResponse string - calls []string + + m sync.Mutex + calls []string } var _ = Suite(&documentPortalSuite{}) @@ -87,9 +90,18 @@ os.RemoveAll(s.userRuntimePath) err := os.MkdirAll(s.userRuntimePath, 0777) c.Assert(err, IsNil) - s.getMountPointError = nil - s.mountPointResponse = "" - s.calls = nil + s.withLocked(func() { + s.getMountPointError = nil + s.mountPointResponse = "" + s.calls = nil + }) +} + +func (s *documentPortalSuite) withLocked(f func()) { + s.m.Lock() + defer s.m.Unlock() + + f() } func (s *documentPortalSuite) TestGetDefaultMountPointWithUserError(c *C) { @@ -114,27 +126,35 @@ } func (s *documentPortalSuite) TestGetMountPointResponseError(c *C) { - s.getMountPointError = dbus.MakeFailedError(errors.New("something went wrong")) + s.withLocked(func() { + s.getMountPointError = dbus.MakeFailedError(errors.New("something went wrong")) + }) document := &portal.Document{} mountPoint, err := document.GetMountPoint() c.Check(err, FitsTypeOf, dbus.Error{}) c.Check(err, ErrorMatches, `something went wrong`) c.Check(mountPoint, Equals, "") - c.Check(s.calls, DeepEquals, []string{ - "GetMountPoint", + s.withLocked(func() { + c.Check(s.calls, DeepEquals, []string{ + "GetMountPoint", + }) }) } func (s *documentPortalSuite) TestGetMountPointHappy(c *C) { - s.mountPointResponse = filepath.Join(s.userRuntimePath, "doc") + s.withLocked(func() { + s.mountPointResponse = filepath.Join(s.userRuntimePath, "doc") + }) document := &portal.Document{} mountPoint, err := document.GetMountPoint() c.Check(err, IsNil) c.Check(mountPoint, Equals, s.mountPointResponse) - c.Check(s.calls, DeepEquals, []string{ - "GetMountPoint", + s.withLocked(func() { + c.Check(s.calls, DeepEquals, []string{ + "GetMountPoint", + }) }) } @@ -143,6 +163,8 @@ } func (p *fakeDocumentPortal) GetMountPoint() ([]byte, *dbus.Error) { + p.m.Lock() + defer p.m.Unlock() p.calls = append(p.calls, "GetMountPoint") return []byte(p.mountPointResponse), p.getMountPointError diff -Nru snapd-2.62+23.10/gadget/device/encrypt.go snapd-2.63+23.10/gadget/device/encrypt.go --- snapd-2.62+23.10/gadget/device/encrypt.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/device/encrypt.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" @@ -45,11 +44,11 @@ // ReadEncryptionMarkers reads the encryption marker files at the appropriate // locations. func ReadEncryptionMarkers(dataFDEDir, saveFDEDir string) ([]byte, []byte, error) { - marker1, err := ioutil.ReadFile(encryptionMarkerUnder(dataFDEDir)) + marker1, err := os.ReadFile(encryptionMarkerUnder(dataFDEDir)) if err != nil { return nil, nil, err } - marker2, err := ioutil.ReadFile(encryptionMarkerUnder(saveFDEDir)) + marker2, err := os.ReadFile(encryptionMarkerUnder(saveFDEDir)) if err != nil { return nil, nil, err } @@ -132,7 +131,7 @@ // TODO:UC20: consider more than the marker for cases where we reseal // outside of run mode stamp := filepath.Join(dirs.SnapFDEDirUnder(rootdir), "sealed-keys") - content, err := ioutil.ReadFile(stamp) + content, err := os.ReadFile(stamp) if os.IsNotExist(err) { return sm, ErrNoSealedKeys } diff -Nru snapd-2.62+23.10/gadget/device/encrypt_test.go snapd-2.63+23.10/gadget/device/encrypt_test.go --- snapd-2.62+23.10/gadget/device/encrypt_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/device/encrypt_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,6 @@ package device_test import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -124,7 +123,7 @@ c.Assert(err, IsNil) c.Check(tc.mth, Equals, mth) - content, err := ioutil.ReadFile(filepath.Join(root, "/var/lib/snapd/device/fde/sealed-keys")) + content, err := os.ReadFile(filepath.Join(root, "/var/lib/snapd/device/fde/sealed-keys")) c.Assert(err, IsNil) c.Check(string(content), Equals, tc.expected) } diff -Nru snapd-2.62+23.10/gadget/gadget.go snapd-2.63+23.10/gadget/gadget.go --- snapd-2.62+23.10/gadget/gadget.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/gadget.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -679,7 +678,7 @@ return nil, nil } - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -1209,7 +1208,7 @@ // validation like Validate. func ReadInfo(gadgetSnapRootDir string, model Model) (*Info, error) { gadgetYamlFn := filepath.Join(gadgetSnapRootDir, "meta", "gadget.yaml") - ginfo, err := readInfo(ioutil.ReadFile, gadgetYamlFn, model) + ginfo, err := readInfo(os.ReadFile, gadgetYamlFn, model) if err != nil { return nil, err } @@ -1909,7 +1908,7 @@ // but could be used on any known to be properly installed gadget. func HasRole(gadgetSnapRootDir string, roles []string) (foundRole string, err error) { gadgetYamlFn := filepath.Join(gadgetSnapRootDir, "meta", "gadget.yaml") - gadgetYaml, err := ioutil.ReadFile(gadgetYamlFn) + gadgetYaml, err := os.ReadFile(gadgetYamlFn) if err != nil { return "", err } diff -Nru snapd-2.62+23.10/gadget/install/content.go snapd-2.63+23.10/gadget/install/content.go --- snapd-2.62+23.10/gadget/install/content.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/install/content.go 2024-04-24 00:00:39.000000000 +0000 @@ -100,7 +100,7 @@ err = fmt.Errorf("cannot unmount %v after writing filesystem content: %v", fsDevice, errUnmount) } }() - fs, err := gadget.NewMountedFilesystemWriter(laidOut, observer) + fs, err := gadget.NewMountedFilesystemWriter(nil, laidOut, observer) if err != nil { return fmt.Errorf("cannot create filesystem image writer: %v", err) } diff -Nru snapd-2.62+23.10/gadget/install/content_test.go snapd-2.63+23.10/gadget/install/content_test.go --- snapd-2.62+23.10/gadget/install/content_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/install/content_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "errors" "fmt" - "io/ioutil" + "os" "path/filepath" "syscall" @@ -220,7 +220,7 @@ if err == nil { // the target file system is mounted on a directory named after the structure index - content, err := ioutil.ReadFile(filepath.Join(dirs.SnapRunDir, "gadget-install/dev-node2", "EFI/boot/grubx64.efi")) + content, err := os.ReadFile(filepath.Join(dirs.SnapRunDir, "gadget-install/dev-node2", "EFI/boot/grubx64.efi")) c.Assert(err, IsNil) c.Check(string(content), Equals, "grubx64.efi content") c.Assert(obs.content, DeepEquals, map[string][]*mockContentChange{ diff -Nru snapd-2.62+23.10/gadget/install/install_test.go snapd-2.63+23.10/gadget/install/install_test.go --- snapd-2.62+23.10/gadget/install/install_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/install/install_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -1038,10 +1037,10 @@ var data []byte for _, mntPt := range []string{espMntPt, bootMntPt} { - data, err = ioutil.ReadFile(filepath.Join(mntPt, "EFI/boot/bootx64.efi")) + data, err = os.ReadFile(filepath.Join(mntPt, "EFI/boot/bootx64.efi")) c.Check(err, IsNil) c.Check(string(data), Equals, "shim.efi.signed content") - data, err = ioutil.ReadFile(filepath.Join(mntPt, "EFI/boot/grubx64.efi")) + data, err = os.ReadFile(filepath.Join(mntPt, "EFI/boot/grubx64.efi")) c.Check(err, IsNil) c.Check(string(data), Equals, "grubx64.efi content") } diff -Nru snapd-2.62+23.10/gadget/mountedfilesystem.go snapd-2.63+23.10/gadget/mountedfilesystem.go --- snapd-2.62+23.10/gadget/mountedfilesystem.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/mountedfilesystem.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,7 @@ _ "crypto/sha1" "fmt" "io" - "io/ioutil" + "io/fs" "os" "path/filepath" "sort" @@ -77,13 +77,14 @@ // MountedFilesystemWriter assists in writing contents of a structure to a // mounted filesystem. type MountedFilesystemWriter struct { + fromPs *LaidOutStructure ps *LaidOutStructure observer ContentObserver } // NewMountedFilesystemWriter returns a writer capable of writing provided // structure, with content of the structure stored in the given root directory. -func NewMountedFilesystemWriter(ps *LaidOutStructure, observer ContentObserver) (*MountedFilesystemWriter, error) { +func NewMountedFilesystemWriter(fromPs, ps *LaidOutStructure, observer ContentObserver) (*MountedFilesystemWriter, error) { if ps == nil { return nil, fmt.Errorf("internal error: *LaidOutStructure is nil") } @@ -91,6 +92,7 @@ return nil, fmt.Errorf("structure %v has no filesystem", ps) } fw := &MountedFilesystemWriter{ + fromPs: fromPs, ps: ps, observer: observer, } @@ -153,7 +155,7 @@ dst = filepath.Join(dst, filepath.Base(src)) } - fis, err := ioutil.ReadDir(src) + fis, err := os.ReadDir(src) if err != nil { return fmt.Errorf("cannot list directory entries: %v", err) } @@ -306,9 +308,9 @@ // structure, with structure content coming from provided root directory. The // mount is located by calling a mount lookup helper. The backup directory // contains backup state information for use during rollback. -func newMountedFilesystemUpdater(ps *LaidOutStructure, backupDir string, mountLookup mountLookupFunc, observer ContentObserver) (*mountedFilesystemUpdater, error) { +func newMountedFilesystemUpdater(fromPs, ps *LaidOutStructure, backupDir string, mountLookup mountLookupFunc, observer ContentObserver) (*mountedFilesystemUpdater, error) { // avoid passing observer, writes will not be observed - fw, err := NewMountedFilesystemWriter(ps, nil) + fw, err := NewMountedFilesystemWriter(fromPs, ps, nil) if err != nil { return nil, err } @@ -354,6 +356,24 @@ return dstPath, backupPath } +func getDestinationPath(content VolumeContent) (dst string) { + dst = content.Target + if strings.HasSuffix(dst, "/") { + dst = filepath.Join(dst, filepath.Base(content.UnresolvedSource)) + } + + return dst +} + +func (f *mountedFilesystemUpdater) getKnownContent() (knownContent map[string]bool) { + knownContent = make(map[string]bool) + for _, c := range f.ps.VolumeStructure.Content { + knownContent[getDestinationPath(c)] = true + } + + return knownContent +} + // Update applies an update to a mounted filesystem. The caller must have // executed a Backup() before, to prepare a data set for rollback purpose. func (f *mountedFilesystemUpdater) Update() error { @@ -365,6 +385,7 @@ backupRoot := fsStructBackupPath(f.backupDir, f.ps) skipped := 0 + for _, c := range f.ps.ResolvedContent { if err := f.updateVolumeContent(f.mountPoint, &c, preserveInDst, backupRoot); err != nil { if err == ErrNoUpdate { @@ -375,14 +396,48 @@ } } - if skipped == len(f.ps.ResolvedContent) { + knownContent := f.getKnownContent() + deleted := false + if f.fromPs != nil { + for _, c := range f.fromPs.VolumeStructure.Content { + if knownContent[getDestinationPath(c)] { + continue + } + destPath, backupPath := f.entryDestPaths(f.mountPoint, c.UnresolvedSource, c.Target, backupRoot) + preserveStamp := backupPath + ".preserve" + + // We skip directory because we do not know + // exactly the content that is supposed to be + // in there. + // XXX: it might be possible to recursively compare + // directories from mounted snaps to detect + // what files are removed. + if osutil.IsDirectory(destPath) { + continue + } + + if strutil.SortedListContains(preserveInDst, destPath) || osutil.FileExists(preserveStamp) { + continue + } + + if err := os.Remove(destPath); err != nil { + if os.IsNotExist(err) { + continue + } + return fmt.Errorf("cannot remove content: %v", err) + } + deleted = true + } + } + + if !deleted && skipped == len(f.ps.ResolvedContent) { return ErrNoUpdate } return nil } -func (f *mountedFilesystemUpdater) sourceDirectoryEntries(srcPath string) ([]os.FileInfo, error) { +func (f *mountedFilesystemUpdater) sourceDirectoryEntries(srcPath string) ([]fs.DirEntry, error) { if err := checkSourceIsDir(srcPath); err != nil { return nil, err } @@ -392,7 +447,7 @@ return nil, fmt.Errorf("source is a symbolic link") } - return ioutil.ReadDir(srcPath) + return os.ReadDir(srcPath) } // targetInSourceDir resolves the actual target for given source directory name @@ -550,6 +605,35 @@ } } + knownContent := f.getKnownContent() + if f.fromPs != nil { + for _, c := range f.fromPs.VolumeStructure.Content { + if knownContent[getDestinationPath(c)] { + continue + } + + destPath, backupPath := f.entryDestPaths(f.mountPoint, c.UnresolvedSource, c.Target, backupRoot) + // We skip directory because we do not know + // exactly the content that is supposed to be + // in there. + // XXX: it might be possible to recursively compare + // directories from mounted snaps to detect + // what files are removed. + if osutil.IsDirectory(destPath) { + continue + } + backupName := backupPath + ".backup" + + if !osutil.FileExists(destPath) { + continue + } + + if err := writeFileOrSymlink(destPath, backupName, nil); err != nil { + return fmt.Errorf("cannot create backup file: %v", err) + } + } + } + return nil } @@ -801,6 +885,33 @@ return fmt.Errorf("cannot map preserve entries for mount location %q: %v", f.mountPoint, err) } + knownContent := f.getKnownContent() + if f.fromPs != nil { + for _, c := range f.fromPs.VolumeStructure.Content { + if knownContent[getDestinationPath(c)] { + continue + } + + destPath, backupPath := f.entryDestPaths(f.mountPoint, c.UnresolvedSource, c.Target, backupRoot) + + if osutil.IsDirectory(destPath) { + continue + } + + if err := os.Remove(destPath); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("cannot rollback %s: %v", destPath, err) + } + } + + backupName := backupPath + ".backup" + + if err := writeFileOrSymlink(backupName, destPath, nil); err != nil { + return fmt.Errorf("cannot rollback %s: %v", destPath, err) + } + } + } + for _, c := range f.ps.ResolvedContent { if err := f.rollbackVolumeContent(f.mountPoint, &c, preserveInDst, backupRoot); err != nil { return fmt.Errorf("cannot rollback content: %v", err) diff -Nru snapd-2.62+23.10/gadget/mountedfilesystem_test.go snapd-2.63+23.10/gadget/mountedfilesystem_test.go --- snapd-2.62+23.10/gadget/mountedfilesystem_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/mountedfilesystem_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -198,7 +198,7 @@ Filesystem: "ext4", }, } - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) outDir := c.MkDir() @@ -224,7 +224,7 @@ Filesystem: "ext4", }, } - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) outDir := c.MkDir() @@ -245,7 +245,7 @@ Filesystem: "ext4", }, } - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) outDir := c.MkDir() @@ -357,7 +357,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemWriter(ps, obs) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, obs) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -428,7 +428,7 @@ outDir := c.MkDir() - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -454,7 +454,7 @@ outDir := c.MkDir() - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -489,7 +489,7 @@ err := os.Chmod(outDir, 0000) c.Assert(err, IsNil) - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -524,7 +524,7 @@ outDir := c.MkDir() - rw, err := gadget.NewMountedFilesystemWriter(psOverwritesDirectoryWithFile, nil) + rw, err := gadget.NewMountedFilesystemWriter(psOverwritesDirectoryWithFile, psOverwritesDirectoryWithFile, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -559,7 +559,7 @@ outDir := c.MkDir() - rw, err := gadget.NewMountedFilesystemWriter(psOverwritesFile, nil) + rw, err := gadget.NewMountedFilesystemWriter(psOverwritesFile, psOverwritesFile, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -596,7 +596,7 @@ makeSizedFile(c, filepath.Join(outDir, "/foo-dir/foo/bar"), 0, nil) - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -678,7 +678,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -730,7 +730,7 @@ "foo-new", }, } - rw, err := gadget.NewMountedFilesystemWriter(ps, obs) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, obs) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -769,7 +769,7 @@ }, } - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -801,7 +801,7 @@ outDir := c.MkDir() - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -827,13 +827,13 @@ }, } - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, ErrorMatches, "structure #0 has no filesystem") c.Assert(rw, IsNil) } func (s *mountedfilesystemTestSuite) TestMountedWriterTrivialValidation(c *C) { - rw, err := gadget.NewMountedFilesystemWriter(nil, nil) + rw, err := gadget.NewMountedFilesystemWriter(nil, nil, nil) c.Assert(err, ErrorMatches, `internal error: \*LaidOutStructure.*`) c.Assert(rw, IsNil) @@ -852,7 +852,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err = gadget.NewMountedFilesystemWriter(ps, nil) + rw, err = gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) err = rw.Write("", nil) @@ -892,7 +892,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemWriter(ps, nil) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -1028,7 +1028,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -1127,7 +1127,7 @@ observeErr: errors.New("observe fail"), expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemWriter(ps, obs) + rw, err := gadget.NewMountedFilesystemWriter(ps, ps, obs) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -1185,7 +1185,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1240,7 +1240,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1279,7 +1279,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1330,7 +1330,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1376,7 +1376,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1443,7 +1443,7 @@ } { err := os.MkdirAll(tc.backupDir, 0755) c.Assert(err, IsNil) - rw, err := gadget.NewMountedFilesystemUpdater(ps, tc.backupDir, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, tc.backupDir, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return tc.outDir, nil }, nil) @@ -1492,7 +1492,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1539,7 +1539,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1577,7 +1577,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1686,7 +1686,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -1818,7 +1818,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1874,7 +1874,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1933,7 +1933,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -1991,7 +1991,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -2045,7 +2045,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -2082,7 +2082,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -2121,7 +2121,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -2174,7 +2174,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2242,7 +2242,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2296,7 +2296,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2355,7 +2355,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2426,7 +2426,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -2490,7 +2490,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2574,7 +2574,7 @@ c: c, expectedRole: ps.Role(), } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2756,7 +2756,7 @@ "preserved/same-content-for-observer", }, } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, muo) @@ -2952,7 +2952,7 @@ return "", nil } - rw, err := gadget.NewMountedFilesystemUpdater(psNoFs, s.backup, lookupFail, nil) + rw, err := gadget.NewMountedFilesystemUpdater(psNoFs, psNoFs, s.backup, lookupFail, nil) c.Assert(err, ErrorMatches, "structure #0 has no filesystem") c.Assert(rw, IsNil) @@ -2966,15 +2966,15 @@ } s.mustResolveVolumeContent(c, ps) - rw, err = gadget.NewMountedFilesystemUpdater(ps, "", lookupFail, nil) + rw, err = gadget.NewMountedFilesystemUpdater(ps, ps, "", lookupFail, nil) c.Assert(err, ErrorMatches, `internal error: backup directory must not be unset`) c.Assert(rw, IsNil) - rw, err = gadget.NewMountedFilesystemUpdater(ps, s.backup, nil, nil) + rw, err = gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, nil, nil) c.Assert(err, ErrorMatches, `internal error: mount lookup helper must be provided`) c.Assert(rw, IsNil) - rw, err = gadget.NewMountedFilesystemUpdater(nil, s.backup, lookupFail, nil) + rw, err = gadget.NewMountedFilesystemUpdater(nil, nil, s.backup, lookupFail, nil) c.Assert(err, ErrorMatches, `internal error: \*LaidOutStructure.*`) c.Assert(rw, IsNil) @@ -3002,7 +3002,7 @@ testPs.ResolvedContent[0].ResolvedSource = tc.src testPs.ResolvedContent[0].Target = tc.dst - rw, err := gadget.NewMountedFilesystemUpdater(testPs, s.backup, lookupOk, nil) + rw, err := gadget.NewMountedFilesystemUpdater(testPs, testPs, s.backup, lookupOk, nil) c.Assert(err, IsNil) c.Assert(rw, NotNil) @@ -3037,7 +3037,7 @@ return "", errors.New("fail fail fail") } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, lookupFail, nil) + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, lookupFail, nil) c.Assert(err, ErrorMatches, "cannot find mount location of structure #0: fail fail fail") c.Assert(rw, IsNil) } @@ -3069,7 +3069,7 @@ } s.mustResolveVolumeContent(c, ps) - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil }, nil) @@ -3126,7 +3126,7 @@ expectedRole: ps.Role(), preserveTargets: []string{"EFI/ubuntu/grub.cfg"}, } - rw, err := gadget.NewMountedFilesystemUpdater(ps, s.backup, + rw, err := gadget.NewMountedFilesystemUpdater(ps, ps, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, ps) return outDir, nil @@ -3211,7 +3211,7 @@ expectedRole: psForObserver.Role(), preserveTargets: []string{"foo"}, } - rw, err := gadget.NewMountedFilesystemUpdater(psForObserver, s.backup, + rw, err := gadget.NewMountedFilesystemUpdater(psForObserver, psForObserver, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, psForObserver) s.mustResolveVolumeContent(c, psForObserver) @@ -3270,7 +3270,7 @@ expectedRole: psForObserver.Role(), preserveTargets: []string{"foo"}, } - rw, err := gadget.NewMountedFilesystemUpdater(psForObserver, s.backup, + rw, err := gadget.NewMountedFilesystemUpdater(psForObserver, psForObserver, s.backup, func(to *gadget.LaidOutStructure) (string, error) { c.Check(to, DeepEquals, psForObserver) s.mustResolveVolumeContent(c, psForObserver) @@ -3319,3 +3319,228 @@ c.Assert(err, IsNil) c.Check(filepath.Join(outDir, "foo"), testutil.FileEquals, "foo from disk") } + +func (s *mountedfilesystemTestSuite) TestMountedUpdaterRemoveFiles(c *C) { + newData := []gadgetData{ + {name: "bar", target: "foo", content: "data"}, + {name: "bar", target: "some-dir/foo", content: "data"}, + {name: "bar", target: "some-dir/bar", content: "data"}, + } + oldData := append(newData, []gadgetData{ + {name: "bar", target: "some-dir/to-be-removed", content: "data"}, + // We do not support removal of directories + {name: "source-dir", target: "some-other-dir/foo", content: "data"}, + {name: "source-dir", target: "manually-removed", content: "data"}, + {name: "bar", target: "some-dir/preserved", content: "data"}, + {name: "remove-me", target: "some-dir/remove-me", content: "data"}, + }...) + + makeGadgetData(c, s.dir, newData) + + outDir := filepath.Join(c.MkDir(), "out-dir") + makeExistingData(c, outDir, oldData) + c.Assert(os.Remove(filepath.Join(outDir, "manually-removed")), IsNil) + + psBefore := &gadget.LaidOutStructure{ + VolumeStructure: &gadget.VolumeStructure{ + Size: 2048, + Filesystem: "ext4", + Content: []gadget.VolumeContent{ + { + UnresolvedSource: "bar", + Target: "/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/", + }, { + UnresolvedSource: "remove-me", + Target: "/some-dir/", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/to-be-removed", + }, { + UnresolvedSource: "source-dir", + Target: "/some-other-dir", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/preserved", + }, + }, + Update: gadget.VolumeUpdate{ + Edition: 1, + }, + }, + } + psAfter := &gadget.LaidOutStructure{ + VolumeStructure: &gadget.VolumeStructure{ + Size: 2048, + Filesystem: "ext4", + Content: []gadget.VolumeContent{ + { + UnresolvedSource: "bar", + Target: "/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/", + }, + }, + Update: gadget.VolumeUpdate{ + Edition: 2, + Preserve: []string{ + "/some-dir/preserved", + }, + }, + }, + } + s.mustResolveVolumeContent(c, psAfter) + + obs := &mockContentUpdateObserver{ + c: c, + expectedRole: psAfter.Role(), + } + + mountLookup := func(to *gadget.LaidOutStructure) (string, error) { + c.Check(to, DeepEquals, psAfter) + return outDir, nil + } + + rw, err := gadget.NewMountedFilesystemUpdater(psBefore, psAfter, s.backup, mountLookup, obs) + c.Assert(err, IsNil) + + err = rw.Backup() + c.Assert(err, IsNil) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/bar")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/remove-me")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/to-be-removed")), Equals, true) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-other-dir/foo")), Equals, true) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/preserved")), Equals, true) + + err = rw.Update() + c.Assert(err, IsNil) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/bar")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/remove-me")), Equals, false) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/to-be-removed")), Equals, false) + + // We do not support removal of directories + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-other-dir/foo")), Equals, true) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/preserved")), Equals, true) + + err = rw.Rollback() + c.Assert(err, IsNil) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/bar")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/remove-me")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/to-be-removed")), Equals, true) + + // It has always been there, but we can still check + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-other-dir/foo")), Equals, true) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/preserved")), Equals, true) +} + +func (s *mountedfilesystemTestSuite) TestMountedUpdaterRemoveFilesFiltered(c *C) { + data := []gadgetData{ + {name: "bar", target: "foo", content: "data"}, + {name: "bar", target: "some-dir/foo", content: "data"}, + {name: "bar", target: "some-dir/bar", content: "data"}, + } + + makeGadgetData(c, s.dir, data) + outDir := filepath.Join(c.MkDir(), "out-dir") + makeExistingData(c, outDir, data) + + psBefore := &gadget.LaidOutStructure{ + VolumeStructure: &gadget.VolumeStructure{ + Size: 2048, + Filesystem: "ext4", + Content: []gadget.VolumeContent{ + { + UnresolvedSource: "bar", + Target: "/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/", + }, + }, + Update: gadget.VolumeUpdate{ + Edition: 1, + }, + }, + } + psAfter := &gadget.LaidOutStructure{ + VolumeStructure: &gadget.VolumeStructure{ + Size: 2048, + Filesystem: "ext4", + Content: []gadget.VolumeContent{ + { + UnresolvedSource: "bar", + Target: "/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/foo", + }, { + UnresolvedSource: "bar", + Target: "/some-dir/", + }, + }, + Update: gadget.VolumeUpdate{ + Edition: 1, + }, + }, + } + + filterEverything := func(*gadget.ResolvedContent) bool { + return false + } + resolved, err := gadget.ResolveVolumeContent(s.dir, "", nil, psAfter.VolumeStructure, filterEverything) + c.Assert(err, IsNil) + // Make sure we filtered everything + c.Assert(resolved, HasLen, 0) + psAfter.ResolvedContent = resolved + + s.mustResolveVolumeContent(c, psAfter) + + obs := &mockContentUpdateObserver{ + c: c, + expectedRole: psAfter.Role(), + } + + mountLookup := func(to *gadget.LaidOutStructure) (string, error) { + c.Check(to, DeepEquals, psAfter) + return outDir, nil + } + + rw, err := gadget.NewMountedFilesystemUpdater(psBefore, psAfter, s.backup, mountLookup, obs) + c.Assert(err, IsNil) + + err = rw.Backup() + c.Assert(err, IsNil) + + c.Assert(osutil.FileExists(filepath.Join(outDir, "foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/bar")), Equals, true) + + err = rw.Update() + // No file should have been removed + c.Assert(err, Equals, gadget.ErrNoUpdate) + c.Assert(osutil.FileExists(filepath.Join(outDir, "foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/foo")), Equals, true) + c.Assert(osutil.FileExists(filepath.Join(outDir, "some-dir/bar")), Equals, true) +} diff -Nru snapd-2.62+23.10/gadget/update.go snapd-2.63+23.10/gadget/update.go --- snapd-2.62+23.10/gadget/update.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/update.go 2024-04-24 00:00:39.000000000 +0000 @@ -1699,7 +1699,7 @@ if err != nil { return fmt.Errorf("cannot prepare update for volume structure %v on volume %s: %v", one.to, one.volume.Name, err) } - up, err := updaterForStructure(loc, one.to, new.RootDir, rollbackDir, observer) + up, err := updaterForStructure(loc, one.from, one.to, new.RootDir, rollbackDir, observer) if err != nil { return fmt.Errorf("cannot prepare update for volume structure %v on volume %s: %v", one.to, one.volume.Name, err) } @@ -1772,7 +1772,7 @@ var updaterForStructure = updaterForStructureImpl -func updaterForStructureImpl(loc StructureLocation, ps *LaidOutStructure, newRootDir, rollbackDir string, observer ContentUpdateObserver) (Updater, error) { +func updaterForStructureImpl(loc StructureLocation, fromPs *LaidOutStructure, ps *LaidOutStructure, newRootDir, rollbackDir string, observer ContentUpdateObserver) (Updater, error) { // TODO: this is sort of clunky, we already did the lookup, but doing the // lookup out of band from this function makes for easier mocking if !ps.HasFilesystem() { @@ -1784,12 +1784,12 @@ lookup := func(ps *LaidOutStructure) (string, error) { return loc.RootMountPoint, nil } - return newMountedFilesystemUpdater(ps, rollbackDir, lookup, observer) + return newMountedFilesystemUpdater(fromPs, ps, rollbackDir, lookup, observer) } } // MockUpdaterForStructure replace internal call with a mocked one, for use in tests only -func MockUpdaterForStructure(mock func(loc StructureLocation, ps *LaidOutStructure, rootDir, rollbackDir string, observer ContentUpdateObserver) (Updater, error)) (restore func()) { +func MockUpdaterForStructure(mock func(loc StructureLocation, fromPs, ps *LaidOutStructure, rootDir, rollbackDir string, observer ContentUpdateObserver) (Updater, error)) (restore func()) { old := updaterForStructure updaterForStructure = mock return func() { diff -Nru snapd-2.62+23.10/gadget/update_test.go snapd-2.63+23.10/gadget/update_test.go --- snapd-2.62+23.10/gadget/update_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/gadget/update_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -723,7 +723,7 @@ updaterForStructureCalls := 0 updateCalls := make(map[string]bool) backupCalls := make(map[string]bool) - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -884,7 +884,7 @@ updaterForStructureCalls := 0 updateCalls := make(map[string]bool) backupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -1113,7 +1113,7 @@ updaterForStructureCalls := 0 updateCalls := make(map[string]bool) backupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -1394,7 +1394,7 @@ updaterForStructureCalls := 0 updateCalls := make(map[string]bool) backupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -1668,7 +1668,7 @@ updaterForStructureCalls := 0 updateCalls := make(map[string]bool) backupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -1946,7 +1946,7 @@ pcBackupCalls := make(map[string]bool) fooUpdateCalls := make(map[string]bool) fooBackupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -2266,7 +2266,7 @@ newData.Info.Volumes["foo"].Structure[1].Content = []gadget.VolumeContent{{Image: imgName}} muo := &mockUpdateProcessObserver{} - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Fatalf("unexpected call") return nil, errors.New("not called") }) @@ -2510,7 +2510,7 @@ pcBackupCalls := make(map[string]bool) fooUpdateCalls := make(map[string]bool) fooBackupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -2817,7 +2817,7 @@ pcBackupCalls := make(map[string]bool) fooUpdateCalls := make(map[string]bool) fooBackupCalls := make(map[string]bool) - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) c.Assert(observer, Equals, muo) @@ -2960,7 +2960,7 @@ muo := &mockUpdateProcessObserver{} updaterForStructureCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Assert(psRootDir, Equals, newData.RootDir) c.Assert(psRollbackDir, Equals, rollbackDir) @@ -3206,7 +3206,7 @@ newData := gadget.GadgetData{Info: newInfo, RootDir: c.MkDir()} rollbackDir := c.MkDir() - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Fatalf("unexpected call") return &mockUpdater{}, nil }) @@ -3255,7 +3255,7 @@ muo := &mockUpdateProcessObserver{} - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { c.Fatalf("unexpected call") return &mockUpdater{}, nil }) @@ -3394,7 +3394,7 @@ defer r() toUpdate := map[string]int{} - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { toUpdate[ps.Name()]++ return &mockUpdater{}, nil }) @@ -3480,7 +3480,7 @@ defer r() toUpdate := map[string]int{} - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { toUpdate[ps.Name()]++ return &mockUpdater{}, nil }) @@ -3538,7 +3538,7 @@ defer r() toUpdate := map[string]int{} - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { toUpdate[ps.Name()] = toUpdate[ps.Name()] + 1 return &mockUpdater{}, nil }) @@ -3589,7 +3589,7 @@ muo := &mockUpdateProcessObserver{} updaterForStructureCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { updater := &mockUpdater{ updateCb: func() error { c.Fatalf("unexpected update call") @@ -3657,7 +3657,7 @@ backupCalls := make(map[string]bool) rollbackCalls := make(map[string]bool) updaterForStructureCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { updater := &mockUpdater{ backupCb: func() error { backupCalls[ps.Name()] = true @@ -3745,7 +3745,7 @@ backupCalls := make(map[string]bool) rollbackCalls := make(map[string]bool) updaterForStructureCalls := 0 - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { updater := &mockUpdater{ backupCb: func() error { backupCalls[ps.Name()] = true @@ -3838,7 +3838,7 @@ }) defer r() - r = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + r = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { return nil, errors.New("bad updater for structure") }) defer r() @@ -3879,7 +3879,7 @@ EnclosingVolume: &gadget.Volume{}, }, } - updater, err := gadget.UpdaterForStructure(gadget.StructureLocation{}, psBare, gadgetRootDir, rollbackDir, nil) + updater, err := gadget.UpdaterForStructure(gadget.StructureLocation{}, psBare, psBare, gadgetRootDir, rollbackDir, nil) c.Assert(err, IsNil) c.Assert(updater, FitsTypeOf, &gadget.RawStructureUpdater{}) @@ -3892,12 +3892,12 @@ EnclosingVolume: &gadget.Volume{}, }, } - updater, err = gadget.UpdaterForStructure(gadget.StructureLocation{RootMountPoint: "/"}, psFs, gadgetRootDir, rollbackDir, nil) + updater, err = gadget.UpdaterForStructure(gadget.StructureLocation{RootMountPoint: "/"}, psFs, psFs, gadgetRootDir, rollbackDir, nil) c.Assert(err, IsNil) c.Assert(updater, FitsTypeOf, &gadget.MountedFilesystemUpdater{}) // trigger errors - updater, err = gadget.UpdaterForStructure(gadget.StructureLocation{Device: "/dev/vda"}, psBare, gadgetRootDir, "", nil) + updater, err = gadget.UpdaterForStructure(gadget.StructureLocation{Device: "/dev/vda"}, psBare, psBare, gadgetRootDir, "", nil) c.Assert(err, ErrorMatches, "internal error: backup directory cannot be unset") c.Assert(updater, IsNil) } @@ -3962,7 +3962,7 @@ muo := &mockUpdateProcessObserver{} expectedStructs := []string{"first", "second"} updateCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { mu := &mockUpdater{ updateCb: func() error { c.Assert(expectedStructs, testutil.Contains, ps.Name()) @@ -4021,7 +4021,7 @@ muo := &mockUpdateProcessObserver{} expectedStructs := []string{"first", "second"} updateCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { mu := &mockUpdater{ updateCb: func() error { c.Assert(expectedStructs, testutil.Contains, ps.Name()) @@ -4075,7 +4075,7 @@ }) defer r() - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { updater := &mockUpdater{ updateCb: func() error { c.Fatalf("unexpected call") @@ -4127,7 +4127,7 @@ backupErr := errors.New("backup fails") updateErr := errors.New("update fails") - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { updater := &mockUpdater{ backupCb: func() error { return backupErr }, updateCb: func() error { return updateErr }, @@ -4333,7 +4333,7 @@ // updater is only called with the kernel content, not with the // gadget content. mockUpdaterCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { mockUpdaterCalls++ c.Check(ps.ResolvedContent, DeepEquals, []gadget.ResolvedContent{ { @@ -4403,7 +4403,7 @@ rollbackDir := c.MkDir() muo := &mockUpdateProcessObserver{} - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, psRootDir, psRollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { panic("should not get called") }) defer restore() diff -Nru snapd-2.62+23.10/go.mod snapd-2.63+23.10/go.mod --- snapd-2.62+23.10/go.mod 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/go.mod 2024-04-24 00:00:39.000000000 +0000 @@ -19,9 +19,8 @@ // if below two libseccomp-golang lines are updated, one must also update packaging/ubuntu-14.04/rules github.com/mvo5/libseccomp-golang v0.9.1-0.20180308152521-f4de83b52afb // old trusty builds only github.com/seccomp/libseccomp-golang v0.9.2-0.20220502024300-f57e1d55ea18 - github.com/snapcore/bolt v1.3.2-0.20210908134111-63c8bfcf7af8 github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785 - github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830 + github.com/snapcore/secboot v0.0.0-20240411101434-f3ad7c92552a golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 @@ -35,6 +34,8 @@ gopkg.in/yaml.v3 v3.0.1 ) +require go.etcd.io/bbolt v1.3.9 + require ( github.com/canonical/go-sp800.108-kdf v0.0.0-20210314145419-a3359f2d21b9 // indirect github.com/canonical/tcglog-parser v0.0.0-20210824131805-69fa1e9f0ad2 // indirect diff -Nru snapd-2.62+23.10/go.sum snapd-2.63+23.10/go.sum --- snapd-2.62+23.10/go.sum 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/go.sum 2024-04-24 00:00:39.000000000 +0000 @@ -10,6 +10,7 @@ github.com/canonical/tcglog-parser v0.0.0-20210824131805-69fa1e9f0ad2/go.mod h1:QoW2apR2tBl6T/4czdND/EHjL1Ia9cCmQnIj9Xe0Kt8= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9IUY= github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= @@ -37,19 +38,21 @@ github.com/mvo5/libseccomp-golang v0.9.1-0.20180308152521-f4de83b52afb h1:+u5VeqU0Lm7ESN1mS0WONqKRScw7WpPYYtr3zmqEFQ0= github.com/mvo5/libseccomp-golang v0.9.1-0.20180308152521-f4de83b52afb/go.mod h1:RduRpSkQHOCvZTbGgT/NJUGjFBFkYlVedimxssQ64ag= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a h1:3QH7VyOaaiUHNrA9Se4YQIRkDTCw1EJls9xTUCaCeRM= github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502024300-f57e1d55ea18 h1:A15Ffi2aT/BtygokOpAI0Diwrw8PTHuDwaAN5C48s74= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502024300-f57e1d55ea18/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/snapcore/bolt v1.3.2-0.20210908134111-63c8bfcf7af8 h1:WmyDfH38e3MaMWrMCO5YpW96BANq5Ti2iwbliM/xTW0= -github.com/snapcore/bolt v1.3.2-0.20210908134111-63c8bfcf7af8/go.mod h1:Z6z3sf12AMDjT/4tbT/PmzzdACAxkWGhkuKWiVpTWLM= github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785 h1:PaunR+BhraKSLxt2awQ42zofkP+NKh/VjQ0PjIMk/y4= github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785/go.mod h1:D3SsWAXK7wCCBZu+Vk5hc1EuKj/L3XN1puEMXTU4LrQ= github.com/snapcore/maze.io-x-crypto v0.0.0-20190131090603-9b94c9afe066 h1:InG0EmriMOiI4YgtQNOo+6fNxzLCYioo3Q3BCVLdMCE= github.com/snapcore/maze.io-x-crypto v0.0.0-20190131090603-9b94c9afe066/go.mod h1:VuAdaITF1MrGzxPU+8GxagM1HW2vg7QhEFEeGHbmEMU= -github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830 h1:SCJ9Uiekv6uMqzMGP50Y0KBxkLP7IzPW35aI3Po6iyM= -github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830/go.mod h1:72paVOkm4sJugXt+v9ItmnjXgO921D8xqsbH2OekouY= +github.com/snapcore/secboot v0.0.0-20240411101434-f3ad7c92552a h1:yzzVi0yUosDYkjSQqGZNVtaVi+6yNFLiF0erKHlBbdo= +github.com/snapcore/secboot v0.0.0-20240411101434-f3ad7c92552a/go.mod h1:72paVOkm4sJugXt+v9ItmnjXgO921D8xqsbH2OekouY= github.com/snapcore/snapd v0.0.0-20201005140838-501d14ac146e/go.mod h1:3xrn7QDDKymcE5VO2rgWEQ5ZAUGb9htfwlXnoel6Io8= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -60,6 +63,7 @@ golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI= golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff -Nru snapd-2.62+23.10/httputil/client.go snapd-2.63+23.10/httputil/client.go --- snapd-2.62+23.10/httputil/client.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/httputil/client.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,10 +23,10 @@ "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" "path/filepath" "time" @@ -62,7 +62,7 @@ } extraCerts := make([]*CertData, 0, len(extraCertFiles)) for _, p := range extraCertFiles { - cert, err := ioutil.ReadFile(p) + cert, err := os.ReadFile(p) if err != nil { return nil, fmt.Errorf("cannot read certificate: %v", err) } diff -Nru snapd-2.62+23.10/httputil/logger_test.go snapd-2.63+23.10/httputil/logger_test.go --- snapd-2.62+23.10/httputil/logger_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/httputil/logger_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "os" @@ -113,7 +113,7 @@ Header: http.Header{ "Content-Type": []string{"application/octet-stream"}, }, - Body: ioutil.NopCloser(strings.NewReader(needle)), + Body: io.NopCloser(strings.NewReader(needle)), } tr := &httputil.LoggedTransport{ Transport: &fakeTransport{ diff -Nru snapd-2.62+23.10/i18n/xgettext-go/main.go snapd-2.63+23.10/i18n/xgettext-go/main.go --- snapd-2.62+23.10/i18n/xgettext-go/main.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/i18n/xgettext-go/main.go 2024-04-24 00:00:39.000000000 +0000 @@ -7,7 +7,6 @@ "go/parser" "go/token" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -174,13 +173,13 @@ // If no search directories have been specified or we have an // absolute path, just try to read the contents directly. if len(opts.Directories) == 0 || filepath.IsAbs(fname) { - return ioutil.ReadFile(fname) + return os.ReadFile(fname) } // Otherwise, search for the file in each of the configured // directories. for _, dir := range opts.Directories { - content, err = ioutil.ReadFile(filepath.Join(dir, fname)) + content, err = os.ReadFile(filepath.Join(dir, fname)) if !os.IsNotExist(err) { break } @@ -324,7 +323,7 @@ var files []string if opts.FilesFrom != "" { - content, err := ioutil.ReadFile(opts.FilesFrom) + content, err := os.ReadFile(opts.FilesFrom) if err != nil { log.Fatalf("cannot read file %v: %v", opts.FilesFrom, err) } diff -Nru snapd-2.62+23.10/image/helpers.go snapd-2.63+23.10/image/helpers.go --- snapd-2.62+23.10/image/helpers.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/image/helpers.go 2024-04-24 00:00:39.000000000 +0000 @@ -98,7 +98,7 @@ if !ps.HasFilesystem() { continue } - mw, err := gadget.NewMountedFilesystemWriter(&ps, nil) + mw, err := gadget.NewMountedFilesystemWriter(nil, &ps, nil) if err != nil { return err } diff -Nru snapd-2.62+23.10/image/image_linux.go snapd-2.63+23.10/image/image_linux.go --- snapd-2.62+23.10/image/image_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/image/image_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -190,7 +189,7 @@ func decodeModelAssertion(opts *Options) (*asserts.Model, error) { fn := opts.ModelFile - rawAssert, err := ioutil.ReadFile(fn) + rawAssert, err := os.ReadFile(fn) if err != nil { return nil, fmt.Errorf("cannot read model assertion: %s", err) } diff -Nru snapd-2.62+23.10/image/image_test.go snapd-2.63+23.10/image/image_test.go --- snapd-2.62+23.10/image/image_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/image/image_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "context" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -771,7 +770,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -1197,7 +1196,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 5) @@ -1520,7 +1519,7 @@ }) c.Check(runSnaps[1].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 6) @@ -1657,7 +1656,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 5) @@ -1800,7 +1799,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -2028,7 +2027,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -2237,7 +2236,7 @@ // check assertions seedassertsdir := filepath.Join(seeddir, "assertions") - l, err := ioutil.ReadDir(seedassertsdir) + l, err := os.ReadDir(seedassertsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 9) } @@ -2863,7 +2862,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 3) @@ -2973,7 +2972,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 5) @@ -3036,7 +3035,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 2) @@ -3105,7 +3104,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -3150,7 +3149,7 @@ c.Check(essSnaps, HasLen, 0) c.Check(runSnaps, HasLen, 0) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 0) @@ -3340,7 +3339,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 5) @@ -3505,7 +3504,7 @@ essSnaps, runSnaps, _ := s.loadSeed(c, seeddir) c.Check(essSnaps, HasLen, 4) c.Check(runSnaps, HasLen, 0) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -3894,7 +3893,7 @@ }) } - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -4041,7 +4040,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 5) @@ -4234,7 +4233,7 @@ }) c.Check(runSnaps[0].Path, testutil.FilePresent) - l, err := ioutil.ReadDir(seedsnapsdir) + l, err := os.ReadDir(seedsnapsdir) c.Assert(err, IsNil) c.Check(l, HasLen, 4) diff -Nru snapd-2.62+23.10/image/preseed/preseed_linux.go snapd-2.63+23.10/image/preseed/preseed_linux.go --- snapd-2.62+23.10/image/preseed/preseed_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/image/preseed/preseed_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -376,11 +375,11 @@ } var makePreseedTempDir = func() (string, error) { - return ioutil.TempDir("", "preseed-") + return os.MkdirTemp("", "preseed-") } var makeWritableTempDir = func() (string, error) { - return ioutil.TempDir("", "writable-") + return os.MkdirTemp("", "writable-") } func prepareCore20Chroot(opts *CoreOptions) (popts *preseedCoreOptions, cleanup func(), err error) { diff -Nru snapd-2.62+23.10/image/preseed/reset.go snapd-2.63+23.10/image/preseed/reset.go --- snapd-2.62+23.10/image/preseed/reset.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/image/preseed/reset.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" @@ -37,7 +36,7 @@ // lxd.lxc -> /snap/core/current/usr/lib/snapd/complete.sh // lxc -> lxd.lxc func resetCompletionSymlinks(completersPath string) error { - files, err := ioutil.ReadDir(completersPath) + files, err := os.ReadDir(completersPath) if err != nil && !os.IsNotExist(err) { return fmt.Errorf("error reading %s: %v", completersPath, err) } @@ -46,7 +45,7 @@ // pass 1: find all symlinks pointing at complete.sh for _, fileInfo := range files { - if fileInfo.Mode()&os.ModeSymlink == 0 { + if fileInfo.Type()&os.ModeSymlink == 0 { continue } fullPath := filepath.Join(completersPath, fileInfo.Name()) diff -Nru snapd-2.62+23.10/interfaces/apparmor/backend.go snapd-2.63+23.10/interfaces/apparmor/backend.go --- snapd-2.62+23.10/interfaces/apparmor/backend.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/apparmor/backend.go 2024-04-24 00:00:39.000000000 +0000 @@ -40,7 +40,6 @@ import ( "bytes" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -162,10 +161,10 @@ // We must test the ".real" suffix first, this is a workaround for // https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=858004 vanillaProfilePath := filepath.Join(info.MountDir(), "/etc/apparmor.d/usr.lib.snapd.snap-confine.real") - vanillaProfileText, err := ioutil.ReadFile(vanillaProfilePath) + vanillaProfileText, err := os.ReadFile(vanillaProfilePath) if os.IsNotExist(err) { vanillaProfilePath = filepath.Join(info.MountDir(), "/etc/apparmor.d/usr.lib.snapd.snap-confine") - vanillaProfileText, err = ioutil.ReadFile(vanillaProfilePath) + vanillaProfileText, err = os.ReadFile(vanillaProfilePath) } if err != nil { return "", "", nil, fmt.Errorf("cannot open apparmor profile for vanilla snap-confine: %s", err) @@ -264,10 +263,10 @@ changed, removed, errEnsure := osutil.EnsureDirState(dir, glob, content) if len(changed) == 0 { - // XXX: because NFS workaround is handled separately the same correct - // snap-confine profile may need to be re-loaded. This is because the - // profile contains include directives and those load a second file - // that has changed outside of the scope of EnsureDirState. + // XXX: because remote file system workaround is handled separately the + // same correct snap-confine profile may need to be re-loaded. This is + // because the profile contains include directives and those load a + // second file that has changed outside of the scope of EnsureDirState. // // To counter that, always reload the profile by pretending it had // changed. @@ -656,6 +655,16 @@ snippets += strings.Replace(apparmor_sandbox.OverlayRootSnippet, "###UPPERDIR###", overlayRoot, -1) } return snippets + case "###INCLUDE_SYSTEM_TUNABLES_HOME_D_WITH_VENDORED_APPARMOR###": + // XXX: refactor this so that we don't have to duplicate this part. + // TODO: rewrite this whole mess with go templates. + features, _ := parserFeatures() + if strutil.ListContains(features, "snapd-internal") { + return `#include if exists "/etc/apparmor.d/tunables/home.d"` + } + return "" + default: + // TODO: Warn that an invalid pattern is being used. } return "" }) @@ -907,12 +916,13 @@ // ignoring all apparmor snippets as they may conflict with the // super-broad template we are starting with. } else { - // Check if NFS is mounted at or under $HOME. Because NFS is not - // transparent to apparmor we must alter the profile to counter that and - // allow access to SNAP_USER_* files. + // Check if a remote file system is mounted at or under $HOME. + // Because some file systems, like NFS, are not transparent to + // apparmor we must alter the profile to counter that and allow + // access to SNAP_USER_* files. tagSnippets = snippetForTag - if nfs, _ := osutil.IsHomeUsingNFS(); nfs { - tagSnippets += apparmor_sandbox.NfsSnippet + if isRemote, _ := osutil.IsHomeUsingRemoteFS(); isRemote { + tagSnippets += apparmor_sandbox.RemoteFSSnippet } if overlayRoot, _ := isRootWritableOverlay(); overlayRoot != "" { diff -Nru snapd-2.62+23.10/interfaces/apparmor/backend_test.go snapd-2.63+23.10/interfaces/apparmor/backend_test.go --- snapd-2.62+23.10/interfaces/apparmor/backend_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/apparmor/backend_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -814,7 +813,7 @@ err := s.Backend.Setup(appSet, interfaces.ConfinementOptions{}, s.Repo, s.meas) c.Assert(err, IsNil) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) for _, line := range []string{ // preamble @@ -854,7 +853,7 @@ err := s.Backend.Setup(appSet, interfaces.ConfinementOptions{}, s.Repo, s.meas) c.Assert(err, IsNil) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) for _, line := range []string{ // preamble @@ -1052,7 +1051,7 @@ func (s *backendSuite) TestCombineSnippets(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -1092,7 +1091,7 @@ func (s *backendSuite) TestUnconfinedFlag(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -1150,7 +1149,7 @@ func (s *backendSuite) TestCombineSnippetsChangeProfile(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -1188,7 +1187,7 @@ func (s *backendSuite) TestCombineSnippetsIncludeIfExists(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -1260,7 +1259,7 @@ func (s *backendSuite) TestParallelInstallCombineSnippets(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -1309,7 +1308,7 @@ func (s *backendSuite) TestTemplateVarsWithHook(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -1660,17 +1659,17 @@ } // Ensure that both names of the snap-confine apparmor profile are supported. -func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithNFS1(c *C) { - s.testSetupSnapConfineGeneratedPolicyWithNFS(c, "usr.lib.snapd.snap-confine") +func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithRemoteFS1(c *C) { + s.testSetupSnapConfineGeneratedPolicyWithRemoteFS(c, "usr.lib.snapd.snap-confine") } -func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithNFS2(c *C) { - s.testSetupSnapConfineGeneratedPolicyWithNFS(c, "usr.lib.snapd.snap-confine.real") +func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithRemoteFS2(c *C) { + s.testSetupSnapConfineGeneratedPolicyWithRemoteFS(c, "usr.lib.snapd.snap-confine.real") } -func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithNFSNoProfileFiles(c *C) { - // Make it appear as if NFS workaround was needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) +func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithRemoteFSNoProfileFiles(c *C) { + // Make it appear as if remote file system workaround was needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() // Make it appear as if overlay was not used. restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) @@ -1690,10 +1689,10 @@ c.Assert(cmd.Calls(), HasLen, 0) } -// snap-confine policy when NFS is used and snapd has not re-executed. -func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithNFS(c *C, profileFname string) { - // Make it appear as if NFS workaround was needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) +// snap-confine policy when remote file system is used and snapd has not re-executed. +func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithRemoteFS(c *C, profileFname string) { + // Make it appear as if remote file system workaround was needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() // Make it appear as if overlay was not used. @@ -1721,12 +1720,14 @@ err = (&apparmor.Backend{}).Initialize(ifacetest.DefaultInitializeOpts) c.Assert(err, IsNil) - // Because NFS is being used, we have the extra policy file. - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + // Because remote file system is being used, we have the extra policy file. + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "nfs-support") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) // The policy allows network access. @@ -1742,10 +1743,10 @@ }}) } -// snap-confine policy when NFS is used and snapd has re-executed. -func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithNFSAndReExec(c *C) { - // Make it appear as if NFS workaround was needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) +// snap-confine policy when remote file system is used and snapd has re-executed. +func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithRemoteFSAndReExec(c *C) { + // Make it appear as if remote file system workaround was needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() // Make it appear as if overlay was not used. @@ -1769,12 +1770,14 @@ err = (&apparmor.Backend{}).Initialize(ifacetest.DefaultInitializeOpts) c.Assert(err, IsNil) - // Because NFS is being used, we have the extra policy file. - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + // Because remote file system is being used, we have the extra policy file. + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "nfs-support") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) // The policy allows network access. @@ -1789,8 +1792,8 @@ // Test behavior when os.Readlink "/proc/self/exe" fails. func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyError1(c *C) { - // Make it appear as if NFS workaround was needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) + // Make it appear as if remote file system workaround was needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() // Make it appear as if overlay was not used. @@ -1812,7 +1815,7 @@ c.Assert(err, ErrorMatches, "cannot read .*corrupt-proc-self-exe: .*") // We didn't create the policy file. - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) @@ -1822,8 +1825,8 @@ // Test behavior when exec.Command "apparmor_parser" fails func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyError2(c *C) { - // Make it appear as if NFS workaround was needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) + // Make it appear as if remote file system workaround was needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() // Make it appear as if overlay was not used. @@ -1851,7 +1854,7 @@ // While created the policy file initially we also removed it so that // no side-effects remain. - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) @@ -1872,8 +1875,8 @@ // Make it appear as if overlay workaround was needed. restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "/upper", nil }) defer restore() - // No NFS workaround - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + // No remote file system workaround + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Intercept interaction with apparmor_parser @@ -1895,7 +1898,7 @@ // Make it appear as if overlay workaround was needed. restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "/upper", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Intercept the /proc/self/exe symlink and point it to the distribution @@ -1920,15 +1923,17 @@ c.Assert(err, IsNil) // Because overlay is being used, we have the extra policy file. - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "overlay-root") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) // The policy allows upperdir access. - data, err := ioutil.ReadFile(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name())) + data, err := os.ReadFile(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name())) c.Assert(err, IsNil) c.Assert(string(data), testutil.Contains, "\"/upper/{,**/}\" r,") @@ -1946,7 +1951,7 @@ restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "/upper", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Intercept interaction with apparmor_parser @@ -1967,15 +1972,17 @@ c.Assert(err, IsNil) // Because overlay is being used, we have the extra policy file. - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "overlay-root") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) // The policy allows upperdir access - data, err := ioutil.ReadFile(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name())) + data, err := os.ReadFile(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name())) c.Assert(err, IsNil) c.Assert(string(data), testutil.Contains, "\"/upper/{,**/}\" r,") @@ -1987,7 +1994,7 @@ func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithBPFCapability(c *C, reexec bool) { restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Pretend apparmor_parser supports bpf capability apparmor_sandbox.MockFeatures(nil, nil, []string{"cap-bpf"}, nil) @@ -2021,11 +2028,13 @@ // Capability bpf is supported by the parser, so an extra policy file // for snap-confine is present - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "cap-bpf") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) c.Assert(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name()), @@ -2061,7 +2070,7 @@ defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Probing for apparmor_parser features failed apparmor_sandbox.MockFeatures(nil, nil, nil, fmt.Errorf("mock probe error")) @@ -2089,7 +2098,7 @@ // Probing apparmor_parser capabilities failed, so nothing gets written // to the snap-confine policy directory - files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) @@ -2100,46 +2109,46 @@ c.Assert(log.String(), testutil.Contains, "cannot determine apparmor_parser features: mock probe error") } -type nfsAndOverlaySnippetsScenario struct { - opts interfaces.ConfinementOptions - overlaySnippet string - nfsSnippet string +type remoteFSAndOverlaySnippetsScenario struct { + opts interfaces.ConfinementOptions + overlaySnippet string + remoteFSSnippet string } -var nfsAndOverlaySnippetsScenarios = []nfsAndOverlaySnippetsScenario{{ +var remoteFSAndOverlaySnippetsScenarios = []remoteFSAndOverlaySnippetsScenario{{ // By default apparmor is enforcing mode. - opts: interfaces.ConfinementOptions{}, - overlaySnippet: `"/upper/{,**/}" r,`, - nfsSnippet: "network inet,\n network inet6,", + opts: interfaces.ConfinementOptions{}, + overlaySnippet: `"/upper/{,**/}" r,`, + remoteFSSnippet: "network inet,\n network inet6,", }, { // DevMode switches apparmor to non-enforcing (complain) mode. - opts: interfaces.ConfinementOptions{DevMode: true}, - overlaySnippet: `"/upper/{,**/}" r,`, - nfsSnippet: "network inet,\n network inet6,", + opts: interfaces.ConfinementOptions{DevMode: true}, + overlaySnippet: `"/upper/{,**/}" r,`, + remoteFSSnippet: "network inet,\n network inet6,", }, { // JailMode switches apparmor to enforcing mode even in the presence of DevMode. - opts: interfaces.ConfinementOptions{DevMode: true, JailMode: true}, - overlaySnippet: `"/upper/{,**/}" r,`, - nfsSnippet: "network inet,\n network inet6,", + opts: interfaces.ConfinementOptions{DevMode: true, JailMode: true}, + overlaySnippet: `"/upper/{,**/}" r,`, + remoteFSSnippet: "network inet,\n network inet6,", }, { // Classic confinement (without jailmode) uses apparmor in complain mode by default and ignores all snippets. - opts: interfaces.ConfinementOptions{Classic: true}, - overlaySnippet: "", - nfsSnippet: "", + opts: interfaces.ConfinementOptions{Classic: true}, + overlaySnippet: "", + remoteFSSnippet: "", }, { // Classic confinement in JailMode uses enforcing apparmor. opts: interfaces.ConfinementOptions{Classic: true, JailMode: true}, // FIXME: logic in backend.addContent is wrong for this case //overlaySnippet: `"/upper/{,**/}" r,`, - //nfsSnippet: "network inet,\n network inet6,", - overlaySnippet: "", - nfsSnippet: "", + //remoteFSSnippet: "network inet,\n network inet6,", + overlaySnippet: "", + remoteFSSnippet: "", }} -func (s *backendSuite) TestNFSAndOverlaySnippets(c *C) { +func (s *backendSuite) TestRemoteFSAndOverlaySnippets(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "/upper", nil }) defer restore() @@ -2147,18 +2156,18 @@ return nil } - for _, scenario := range nfsAndOverlaySnippetsScenarios { + for _, scenario := range remoteFSAndOverlaySnippetsScenarios { snapInfo := s.InstallSnap(c, scenario.opts, "", ifacetest.SambaYamlV1, 1) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") c.Check(profile, testutil.FileContains, scenario.overlaySnippet) - c.Check(profile, testutil.FileContains, scenario.nfsSnippet) + c.Check(profile, testutil.FileContains, scenario.remoteFSSnippet) updateNSProfile := filepath.Join(dirs.SnapAppArmorDir, "snap-update-ns.samba") c.Check(updateNSProfile, testutil.FileContains, scenario.overlaySnippet) s.RemoveSnap(c, snapInfo) } } -var casperOverlaySnippetsScenarios = []nfsAndOverlaySnippetsScenario{{ +var casperOverlaySnippetsScenarios = []remoteFSAndOverlaySnippetsScenario{{ // By default apparmor is enforcing mode. opts: interfaces.ConfinementOptions{}, overlaySnippet: `"/upper/{,**/}" r,`, @@ -2185,7 +2194,7 @@ func (s *backendSuite) TestCasperOverlaySnippets(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "/upper", nil }) defer restore() @@ -2268,7 +2277,7 @@ defer restoreTemplate() restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() needle := `deny ptrace (trace),` @@ -2396,7 +2405,7 @@ c.Assert(err, IsNil) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) if tc.expected { @@ -2413,7 +2422,7 @@ defer restoreTemplate() restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() for _, tc := range []struct { @@ -2442,7 +2451,7 @@ snapInfo := s.InstallSnap(c, tc.opts, "", ifacetest.SambaYamlV1, 1) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) c.Assert(string(data), testutil.Contains, tc.expected) @@ -2455,7 +2464,7 @@ defer restoreTemplate() restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() for _, tc := range []struct { @@ -2483,7 +2492,7 @@ snapInfo := s.InstallSnap(c, tc.opts, "", ifacetest.SambaYamlV1, 1) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) c.Assert(string(data), tc.expected, "deny /usr/lib/python3*/{,**/}__pycache__/ w,") @@ -2508,7 +2517,7 @@ snapInfo := s.InstallSnap(c, interfaces.ConfinementOptions{}, "", snapYaml, 1) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.app.cmd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) c.Assert(string(data), testutil.Contains, "capability setuid,") c.Assert(string(data), testutil.Contains, "capability setgid,") @@ -2531,7 +2540,7 @@ snapInfo := s.InstallSnap(c, interfaces.ConfinementOptions{}, "", snapYaml, 1) profile := filepath.Join(dirs.SnapAppArmorDir, "snap.app.cmd") - data, err := ioutil.ReadFile(profile) + data, err := os.ReadFile(profile) c.Assert(err, IsNil) c.Assert(string(data), Not(testutil.Contains), "capability setuid,") c.Assert(string(data), Not(testutil.Contains), "capability setgid,") diff -Nru snapd-2.62+23.10/interfaces/apparmor/template.go snapd-2.63+23.10/interfaces/apparmor/template.go --- snapd-2.62+23.10/interfaces/apparmor/template.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/apparmor/template.go 2024-04-24 00:00:39.000000000 +0000 @@ -1001,6 +1001,12 @@ /run/snapd/lock/###SNAP_INSTANCE_NAME###.lock rwk, /run/snapd/lock/.lock rwk, + # While the base abstraction has rules for encryptfs encrypted home and + # private directories, it is missing rules for directory read on the toplevel + # directory of the mount (LP: #1848919) + owner @{HOME}/.Private/ r, + owner @{HOMEDIRS}/.ecryptfs/*/.Private/ r, + # Allow reading stored mount namespaces, /run/snapd/ns/ r, /run/snapd/ns/###SNAP_INSTANCE_NAME###.mnt r, @@ -1059,6 +1065,8 @@ /tmp/ r, /usr/ r, /var/ r, + /var/lib/ r, + /var/lib/snapd/ r, /var/snap/ r, # Allow reading timezone data. diff -Nru snapd-2.62+23.10/interfaces/builtin/acrn_support_test.go snapd-2.63+23.10/interfaces/builtin/acrn_support_test.go --- snapd-2.62+23.10/interfaces/builtin/acrn_support_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/acrn_support_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -99,7 +99,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets()[0], Equals, `# acrn-support KERNEL=="acrn_hsm", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%s/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%s/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *acrnSupportInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/allegro_vcu_test.go snapd-2.63+23.10/interfaces/builtin/allegro_vcu_test.go --- snapd-2.62+23.10/interfaces/builtin/allegro_vcu_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/allegro_vcu_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -96,7 +96,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# allegro-vcu SUBSYSTEM=="char", KERNEL=="dmaproxy", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf( - `TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + `TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *AllegroVcuInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/alsa_test.go snapd-2.63+23.10/interfaces/builtin/alsa_test.go --- snapd-2.62+23.10/interfaces/builtin/alsa_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/alsa_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 8) c.Assert(spec.Snippets(), testutil.Contains, `# alsa KERNEL=="pcmC[0-9]*D[0-9]*[cp]", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *AlsaInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/audio_playback_test.go snapd-2.63+23.10/interfaces/builtin/audio_playback_test.go --- snapd-2.62+23.10/interfaces/builtin/audio_playback_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/audio_playback_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -215,7 +215,7 @@ KERNEL=="pcmC[0-9]*D[0-9]*[cp]", TAG+="snap_audio-playback_app1"`) c.Assert(spec.Snippets(), testutil.Contains, `# audio-playback KERNEL=="timer", TAG+="snap_audio-playback_app1"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_audio-playback_app1", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_audio-playback_app1"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_audio-playback_app1", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_audio-playback_app1 $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *AudioPlaybackInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/block_devices_test.go snapd-2.63+23.10/interfaces/builtin/block_devices_test.go --- snapd-2.62+23.10/interfaces/builtin/block_devices_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/block_devices_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -94,7 +94,7 @@ c.Assert(spec.Snippets(), HasLen, 5) c.Assert(spec.Snippets()[0], Equals, `# block-devices KERNEL=="megaraid_sas_ioctl_node", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *blockDevicesInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/bluetooth_control_test.go snapd-2.63+23.10/interfaces/builtin/bluetooth_control_test.go --- snapd-2.62+23.10/interfaces/builtin/bluetooth_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/bluetooth_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -107,7 +107,7 @@ SUBSYSTEM=="bluetooth", TAG+="snap_other_app2"`) c.Assert(spec.Snippets(), testutil.Contains, `# bluetooth-control SUBSYSTEM=="BT_chrdev", TAG+="snap_other_app2"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_other_app2", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_other_app2"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_other_app2", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_other_app2 $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *BluetoothControlInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/bluez_test.go snapd-2.63+23.10/interfaces/builtin/bluez_test.go --- snapd-2.62+23.10/interfaces/builtin/bluez_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/bluez_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -251,7 +251,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# bluez KERNEL=="rfkill", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) // on a classic system with bluez slot coming from the core snap. restore = release.MockOnClassic(true) @@ -261,7 +261,7 @@ c.Assert(spec.AddConnectedPlug(s.iface, s.plug, s.coreSlot), IsNil) c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets()[0], testutil.Contains, `KERNEL=="rfkill", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } diff -Nru snapd-2.62+23.10/interfaces/builtin/broadcom_asic_control_test.go snapd-2.63+23.10/interfaces/builtin/broadcom_asic_control_test.go --- snapd-2.62+23.10/interfaces/builtin/broadcom_asic_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/broadcom_asic_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -94,7 +94,7 @@ c.Assert(spec.Snippets(), HasLen, 3) c.Assert(spec.Snippets(), testutil.Contains, `# broadcom-asic-control SUBSYSTEM=="net", KERNEL=="bcm[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *BroadcomAsicControlSuite) TestKModSpec(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/camera_test.go snapd-2.63+23.10/interfaces/builtin/camera_test.go --- snapd-2.62+23.10/interfaces/builtin/camera_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/camera_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -91,7 +91,7 @@ KERNEL=="video[0-9]*", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# camera KERNEL=="vchiq", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *CameraInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/common.go snapd-2.63+23.10/interfaces/builtin/common.go --- snapd-2.62+23.10/interfaces/builtin/common.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/common.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package builtin import ( - "io/ioutil" + "os" "path/filepath" "github.com/snapcore/snapd/interfaces" @@ -37,9 +37,9 @@ // applicable for testing. var evalSymlinks = filepath.EvalSymlinks -// readDir is either ioutil.ReadDir or a mocked function applicable for +// readDir is either os.ReadDir or a mocked function applicable for // testing. -var readDir = ioutil.ReadDir +var readDir = os.ReadDir type commonInterface struct { name string diff -Nru snapd-2.62+23.10/interfaces/builtin/common_test.go snapd-2.63+23.10/interfaces/builtin/common_test.go --- snapd-2.62+23.10/interfaces/builtin/common_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/common_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "os" + "io/fs" . "gopkg.in/check.v1" @@ -64,11 +64,11 @@ c.Assert(spec.Snippets(), DeepEquals, []string{ `# common KERNEL=="foo", TAG+="snap_consumer_app-a"`, - fmt.Sprintf(`TAG=="snap_consumer_app-a", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app-a"`, dirs.DistroLibExecDir), + fmt.Sprintf(`TAG=="snap_consumer_app-a", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app-a $devpath $major:$minor"`, dirs.DistroLibExecDir), // NOTE: app-b is unaffected as it doesn't have a plug reference. `# common KERNEL=="foo", TAG+="snap_consumer_app-c"`, - fmt.Sprintf(`TAG=="snap_consumer_app-c", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app-c"`, dirs.DistroLibExecDir), + fmt.Sprintf(`TAG=="snap_consumer_app-c", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app-c $devpath $major:$minor"`, dirs.DistroLibExecDir), }) // connected plug udev rules are optional @@ -89,8 +89,8 @@ }) } -// MockReadDir replaces the io/ioutil.ReadDir function used inside the caps package. -func MockReadDir(test *testutil.BaseTest, fn func(string) ([]os.FileInfo, error)) { +// MockReadDir replaces the os.ReadDir function used inside the caps package. +func MockReadDir(test *testutil.BaseTest, fn func(string) ([]fs.DirEntry, error)) { orig := readDir readDir = fn test.AddCleanup(func() { diff -Nru snapd-2.62+23.10/interfaces/builtin/desktop.go snapd-2.63+23.10/interfaces/builtin/desktop.go --- snapd-2.62+23.10/interfaces/builtin/desktop.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/desktop.go 2024-04-24 00:00:39.000000000 +0000 @@ -181,14 +181,14 @@ path=/org/freedesktop/Notifications interface=org.freedesktop.Notifications member="{GetCapabilities,GetServerInformation,Notify,CloseNotification}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/org/freedesktop/Notifications interface=org.freedesktop.Notifications member={ActionInvoked,NotificationClosed,NotificationReplied} - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # KDE Plasma's Inhibited property indicating "do not disturb" mode # https://invent.kde.org/plasma/plasma-workspace/-/blob/master/libnotificationmanager/dbus/org.freedesktop.Notifications.xml#L42 @@ -197,14 +197,14 @@ path=/org/freedesktop/Notifications interface=org.freedesktop.DBus.Properties member="Get{,All}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/org/freedesktop/Notifications interface=org.freedesktop.DBus.Properties member=PropertiesChanged - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # DesktopAppInfo Launched dbus (send) diff -Nru snapd-2.62+23.10/interfaces/builtin/desktop_legacy.go snapd-2.63+23.10/interfaces/builtin/desktop_legacy.go --- snapd-2.62+23.10/interfaces/builtin/desktop_legacy.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/desktop_legacy.go 2024-04-24 00:00:39.000000000 +0000 @@ -302,35 +302,35 @@ path=/{StatusNotifierWatcher,org/ayatana/NotificationItem/*} interface=org.kde.StatusNotifierWatcher member=RegisterStatusNotifierItem - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (send) bus=session path=/{StatusNotifierItem,org/ayatana/NotificationItem/*} interface=org.kde.StatusNotifierItem member="New{AttentionIcon,Icon,IconThemePath,OverlayIcon,Status,Title,ToolTip}" - peer=(name=org.freedesktop.DBus, label=unconfined), + peer=(name=org.freedesktop.DBus, label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{StatusNotifierItem,org/ayatana/NotificationItem/*} interface=org.kde.StatusNotifierItem member={Activate,ContextMenu,Scroll,SecondaryActivate,ProvideXdgActivationToken,XAyatanaSecondaryActivate} - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (send) bus=session path=/{StatusNotifierItem/menu,org/ayatana/NotificationItem/*/Menu} interface=com.canonical.dbusmenu member="{LayoutUpdated,ItemsPropertiesUpdated}" - peer=(name=org.freedesktop.DBus, label=unconfined), + peer=(name=org.freedesktop.DBus, label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{StatusNotifierItem,StatusNotifierItem/menu,org/ayatana/NotificationItem/**} interface={org.freedesktop.DBus.Properties,com.canonical.dbusmenu} member={Get*,AboutTo*,Event*} - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # notifications dbus (send) @@ -368,7 +368,7 @@ path=/org/ayatana/NotificationItem/* interface=org.kde.StatusNotifierItem member=XAyatanaNew* - peer=(name=org.freedesktop.DBus, label=unconfined), + peer=(name=org.freedesktop.DBus, label="{plasmashell,unconfined}"), ` const desktopLegacyConnectedPlugSecComp = ` diff -Nru snapd-2.62+23.10/interfaces/builtin/device_buttons_test.go snapd-2.63+23.10/interfaces/builtin/device_buttons_test.go --- snapd-2.62+23.10/interfaces/builtin/device_buttons_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/device_buttons_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -92,7 +92,7 @@ KERNEL=="event[0-9]*", SUBSYSTEM=="input", ENV{ID_INPUT_KEY}=="1", ENV{ID_INPUT_KEYBOARD}!="1", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# device-buttons KERNEL=="full", SUBSYSTEM=="mem", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) c.Assert(spec.TriggeredSubsystems(), DeepEquals, []string{"input/key"}) } diff -Nru snapd-2.62+23.10/interfaces/builtin/display_control_test.go snapd-2.63+23.10/interfaces/builtin/display_control_test.go --- snapd-2.62+23.10/interfaces/builtin/display_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/display_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package builtin_test import ( - "io/ioutil" + "io/fs" "os" "path/filepath" @@ -85,8 +85,8 @@ func (s *displayControlInterfaceSuite) TestAppArmorSpec(c *C) { c.Assert(os.MkdirAll(filepath.Join(s.tmpdir, "foo_backlight"), 0755), IsNil) c.Assert(os.MkdirAll(filepath.Join(s.tmpdir, "bar_backlight"), 0755), IsNil) - builtin.MockReadDir(&s.BaseTest, func(path string) ([]os.FileInfo, error) { - return ioutil.ReadDir(s.tmpdir) + builtin.MockReadDir(&s.BaseTest, func(path string) ([]fs.DirEntry, error) { + return os.ReadDir(s.tmpdir) }) builtin.MockEvalSymlinks(&s.BaseTest, func(path string) (string, error) { return "(dereferenced)" + path, nil diff -Nru snapd-2.62+23.10/interfaces/builtin/dm_crypt_test.go snapd-2.63+23.10/interfaces/builtin/dm_crypt_test.go --- snapd-2.62+23.10/interfaces/builtin/dm_crypt_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/dm_crypt_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -107,7 +107,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# dm-crypt SUBSYSTEM=="block", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, - fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *DmCryptInterfaceSuite) TestSeccompSpec(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/dsp_test.go snapd-2.63+23.10/interfaces/builtin/dsp_test.go --- snapd-2.62+23.10/interfaces/builtin/dsp_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/dsp_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -107,7 +107,7 @@ c.Assert(spec.Snippets(), HasLen, 6) c.Assert(spec.Snippets(), testutil.Contains, `# dsp KERNEL=="iav", TAG+="snap_my-device_svc"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_my-device_svc", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_my-device_svc"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_my-device_svc", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_my-device_svc $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *dspSuite) TestUDevConnectedPlugNoFlavor(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/dvb_test.go snapd-2.63+23.10/interfaces/builtin/dvb_test.go --- snapd-2.62+23.10/interfaces/builtin/dvb_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/dvb_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -90,7 +90,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# dvb SUBSYSTEM=="dvb", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *DvbInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/fpga_test.go snapd-2.63+23.10/interfaces/builtin/fpga_test.go --- snapd-2.62+23.10/interfaces/builtin/fpga_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/fpga_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -92,7 +92,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# fpga SUBSYSTEM=="misc", KERNEL=="fpga[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *FpgaInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/framebuffer_test.go snapd-2.63+23.10/interfaces/builtin/framebuffer_test.go --- snapd-2.62+23.10/interfaces/builtin/framebuffer_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/framebuffer_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -91,7 +91,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets()[0], Equals, `# framebuffer KERNEL=="fb[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *FramebufferInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/fuse_support_test.go snapd-2.63+23.10/interfaces/builtin/fuse_support_test.go --- snapd-2.62+23.10/interfaces/builtin/fuse_support_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/fuse_support_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -98,7 +98,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# fuse-support KERNEL=="fuse", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *FuseSupportInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/fwupd_test.go snapd-2.63+23.10/interfaces/builtin/fwupd_test.go --- snapd-2.62+23.10/interfaces/builtin/fwupd_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/fwupd_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -279,7 +279,7 @@ c.Assert(snippets[11], Equals, `# fwupd SUBSYSTEM=="usb", ENV{DEVTYPE}=="usb_device", TAG+="snap_uefi-fw-tools_app2"`) - expected := fmt.Sprintf(`TAG=="snap_uefi-fw-tools_app2", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_uefi-fw-tools_app2"`, dirs.DistroLibExecDir) + expected := fmt.Sprintf(`TAG=="snap_uefi-fw-tools_app2", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_uefi-fw-tools_app2 $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(snippets[12], Equals, expected) // The implicit slot found on classic systems does not generate any rules diff -Nru snapd-2.62+23.10/interfaces/builtin/hardware_random_control_test.go snapd-2.63+23.10/interfaces/builtin/hardware_random_control_test.go --- snapd-2.62+23.10/interfaces/builtin/hardware_random_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/hardware_random_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# hardware-random-control KERNEL=="hwrng", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *HardwareRandomControlInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/hardware_random_observe_test.go snapd-2.63+23.10/interfaces/builtin/hardware_random_observe_test.go --- snapd-2.62+23.10/interfaces/builtin/hardware_random_observe_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/hardware_random_observe_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# hardware-random-observe KERNEL=="hwrng", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *HardwareRandomObserveInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/hidraw_test.go snapd-2.63+23.10/interfaces/builtin/hidraw_test.go --- snapd-2.62+23.10/interfaces/builtin/hidraw_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/hidraw_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -269,7 +269,7 @@ SUBSYSTEM=="hidraw", KERNEL=="hidraw0", TAG+="snap_client-snap_app-accessing-2-devices"` c.Assert(snippet, Equals, expectedSnippet1) extraSnippet := spec.Snippets()[1] - expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-devices"`, dirs.DistroLibExecDir) + expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-devices $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet1) // add the plug for the first slot with vendor and product ids @@ -282,7 +282,7 @@ SUBSYSTEM=="hidraw", SUBSYSTEMS=="usb", ATTRS{idVendor}=="0001", ATTRS{idProduct}=="0001", TAG+="snap_client-snap_app-accessing-2-devices"` c.Assert(snippet, Equals, expectedSnippet2) extraSnippet = spec.Snippets()[1] - expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-devices"`, dirs.DistroLibExecDir) + expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-devices $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet2) // add the plug for the second slot with vendor and product ids @@ -295,7 +295,7 @@ SUBSYSTEM=="hidraw", SUBSYSTEMS=="usb", ATTRS{idVendor}=="ffff", ATTRS{idProduct}=="ffff", TAG+="snap_client-snap_app-accessing-2-devices"` c.Assert(snippet, Equals, expectedSnippet3) extraSnippet = spec.Snippets()[1] - expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-devices"`, dirs.DistroLibExecDir) + expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-devices $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet3) } @@ -328,7 +328,7 @@ func (s *HidrawInterfaceSuite) TestConnectedPlugUDevSnippetsForPath(c *C) { expectedSnippet1 := `# hidraw SUBSYSTEM=="hidraw", KERNEL=="hidraw0", TAG+="snap_client-snap_app-accessing-2-devices"` - expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-devices"`, dirs.DistroLibExecDir) + expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-devices $devpath $major:$minor"`, dirs.DistroLibExecDir) udevSpec := udev.NewSpecification(interfaces.NewSnapAppSet(s.testPlugPort1.Snap())) err := udevSpec.AddConnectedPlug(s.iface, s.testPlugPort1, s.testSlot1) c.Assert(err, IsNil) @@ -341,7 +341,7 @@ expectedSnippet2 := `# hidraw IMPORT{builtin}="usb_id" SUBSYSTEM=="hidraw", SUBSYSTEMS=="usb", ATTRS{idVendor}=="0001", ATTRS{idProduct}=="0001", TAG+="snap_client-snap_app-accessing-2-devices"` - expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-devices"`, dirs.DistroLibExecDir) + expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-devices $devpath $major:$minor"`, dirs.DistroLibExecDir) udevSpec = udev.NewSpecification(interfaces.NewSnapAppSet(s.testPlugPort1.Snap())) err = udevSpec.AddConnectedPlug(s.iface, s.testPlugPort1, s.testUDev1) c.Assert(err, IsNil) @@ -354,7 +354,7 @@ expectedSnippet3 := `# hidraw IMPORT{builtin}="usb_id" SUBSYSTEM=="hidraw", SUBSYSTEMS=="usb", ATTRS{idVendor}=="ffff", ATTRS{idProduct}=="ffff", TAG+="snap_client-snap_app-accessing-2-devices"` - expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-devices"`, dirs.DistroLibExecDir) + expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-devices", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-devices $devpath $major:$minor"`, dirs.DistroLibExecDir) udevSpec = udev.NewSpecification(interfaces.NewSnapAppSet(s.testPlugPort2.Snap())) err = udevSpec.AddConnectedPlug(s.iface, s.testPlugPort2, s.testUDev2) c.Assert(err, IsNil) diff -Nru snapd-2.62+23.10/interfaces/builtin/i2c_test.go snapd-2.63+23.10/interfaces/builtin/i2c_test.go --- snapd-2.62+23.10/interfaces/builtin/i2c_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/i2c_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -232,7 +232,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# i2c KERNEL=="i2c-1", TAG+="snap_client-snap_app-accessing-1-port"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-1-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-1-port"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-1-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-1-port $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *I2cInterfaceSuite) TestUDevSpecSysfsName(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/iio_test.go snapd-2.63+23.10/interfaces/builtin/iio_test.go --- snapd-2.62+23.10/interfaces/builtin/iio_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/iio_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -203,7 +203,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# iio KERNEL=="iio:device1", TAG+="snap_client-snap_app-accessing-1-port"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-1-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-1-port"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-1-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-1-port $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *IioInterfaceSuite) TestConnectedPlugAppArmorSingleSnippet(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/intel_mei_test.go snapd-2.63+23.10/interfaces/builtin/intel_mei_test.go --- snapd-2.62+23.10/interfaces/builtin/intel_mei_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/intel_mei_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# intel-mei SUBSYSTEM=="mei", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *IntelMEISuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/io_ports_control_test.go snapd-2.63+23.10/interfaces/builtin/io_ports_control_test.go --- snapd-2.62+23.10/interfaces/builtin/io_ports_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/io_ports_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -97,7 +97,7 @@ c.Assert(udevSpec.Snippets(), HasLen, 2) c.Assert(udevSpec.Snippets(), testutil.Contains, `# io-ports-control KERNEL=="port", TAG+="snap_consumer_app"`) - c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *ioPortsControlInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/ion_memory_control_test.go snapd-2.63+23.10/interfaces/builtin/ion_memory_control_test.go --- snapd-2.62+23.10/interfaces/builtin/ion_memory_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/ion_memory_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -90,7 +90,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# ion-memory-control KERNEL=="ion", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, - fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *IonMemoryControlInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/joystick_test.go snapd-2.63+23.10/interfaces/builtin/joystick_test.go --- snapd-2.62+23.10/interfaces/builtin/joystick_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/joystick_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -94,7 +94,7 @@ KERNEL=="event[0-9]*", SUBSYSTEM=="input", ENV{ID_INPUT_JOYSTICK}=="1", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# joystick KERNEL=="full", SUBSYSTEM=="mem", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) c.Assert(spec.TriggeredSubsystems(), DeepEquals, []string{"input/joystick"}) } diff -Nru snapd-2.62+23.10/interfaces/builtin/kernel_module_control_test.go snapd-2.63+23.10/interfaces/builtin/kernel_module_control_test.go --- snapd-2.62+23.10/interfaces/builtin/kernel_module_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/kernel_module_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -99,7 +99,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# kernel-module-control KERNEL=="mem", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *KernelModuleControlInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/kubernetes_support.go snapd-2.63+23.10/interfaces/builtin/kubernetes_support.go --- snapd-2.62+23.10/interfaces/builtin/kubernetes_support.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/kubernetes_support.go 2024-04-24 00:00:39.000000000 +0000 @@ -73,6 +73,11 @@ /etc/ssl/certs/java/{,*} r, #include + +# some workloads like cilium may attempt to use tc to set up complex +# network traffic control, which in turn uses seqpacket +network alg seqpacket, + /{,usr/}bin/systemd-run Cxr -> systemd_run, /run/systemd/private r, profile systemd_run (attach_disconnected,mediate_deleted) { diff -Nru snapd-2.62+23.10/interfaces/builtin/kubernetes_support_test.go snapd-2.63+23.10/interfaces/builtin/kubernetes_support_test.go --- snapd-2.62+23.10/interfaces/builtin/kubernetes_support_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/kubernetes_support_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -254,7 +254,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# kubernetes-support KERNEL=="kmsg", TAG+="snap_kubernetes-support_default"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_kubernetes-support_default", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_kubernetes-support_default"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_kubernetes-support_default", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_kubernetes-support_default $devpath $major:$minor"`, dirs.DistroLibExecDir)) // kubeproxy should not have any rules spec = udev.NewSpecification(interfaces.NewSnapAppSet(s.plugKubeproxy.Snap())) @@ -269,7 +269,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# kubernetes-support KERNEL=="kmsg", TAG+="snap_kubernetes-support_kubelet"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_kubernetes-support_kubelet", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_kubernetes-support_kubelet"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_kubernetes-support_kubelet", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_kubernetes-support_kubelet $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *KubernetesSupportInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/kvm.go snapd-2.63+23.10/interfaces/builtin/kvm.go --- snapd-2.62+23.10/interfaces/builtin/kvm.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/kvm.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -67,7 +67,7 @@ var flagsMatcher = regexp.MustCompile(`(?m)^flags\s+:\s+(.*)$`).FindSubmatch func getCpuFlags() (flags []string, err error) { - buf, err := ioutil.ReadFile(procCpuinfo) + buf, err := os.ReadFile(procCpuinfo) if err != nil { // if we can't read cpuinfo, we want to know _why_ return nil, fmt.Errorf("unable to read %v: %v", procCpuinfo, err) diff -Nru snapd-2.62+23.10/interfaces/builtin/kvm_test.go snapd-2.63+23.10/interfaces/builtin/kvm_test.go --- snapd-2.62+23.10/interfaces/builtin/kvm_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/kvm_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -127,7 +127,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets()[0], Equals, `# kvm KERNEL=="kvm", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%s/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%s/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *kvmInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/media_control_test.go snapd-2.63+23.10/interfaces/builtin/media_control_test.go --- snapd-2.62+23.10/interfaces/builtin/media_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/media_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -93,7 +93,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# media-control SUBSYSTEM=="video4linux", KERNEL=="v4l-subdev[0-9]*", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, - fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *MediacontrolInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/mir_test.go snapd-2.63+23.10/interfaces/builtin/mir_test.go --- snapd-2.62+23.10/interfaces/builtin/mir_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/mir_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -149,7 +149,7 @@ KERNEL=="event[0-9]*", TAG+="snap_mir-server_mir"`) c.Assert(udevSpec.Snippets(), testutil.Contains, `# mir KERNEL=="ts[0-9]*", TAG+="snap_mir-server_mir"`) - c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_mir-server_mir", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_mir-server_mir"`, dirs.DistroLibExecDir)) + c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_mir-server_mir", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_mir-server_mir $devpath $major:$minor"`, dirs.DistroLibExecDir)) c.Assert(udevSpec.TriggeredSubsystems(), DeepEquals, []string{"input"}) } diff -Nru snapd-2.62+23.10/interfaces/builtin/modem_manager_test.go snapd-2.63+23.10/interfaces/builtin/modem_manager_test.go --- snapd-2.62+23.10/interfaces/builtin/modem_manager_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/modem_manager_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -213,7 +213,7 @@ c.Assert(udevSpec.Snippets()[0], testutil.Contains, `SUBSYSTEMS=="usb"`) c.Assert(udevSpec.Snippets(), testutil.Contains, `# modem-manager KERNEL=="rfcomm*|tty[a-zA-Z]*[0-9]*|cdc-wdm[0-9]*|*MBIM|*QMI|*AT|*QCDM", TAG+="snap_modem-manager_mm"`) - c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_modem-manager_mm", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_modem-manager_mm"`, dirs.DistroLibExecDir)) + c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_modem-manager_mm", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_modem-manager_mm $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *ModemManagerInterfaceSuite) TestPermanentSlotDBus(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/network_control_test.go snapd-2.63+23.10/interfaces/builtin/network_control_test.go --- snapd-2.62+23.10/interfaces/builtin/network_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/network_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -145,7 +145,7 @@ c.Assert(spec.Snippets(), HasLen, 3) c.Assert(spec.Snippets(), testutil.Contains, `# network-control KERNEL=="tun", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *NetworkControlInterfaceSuite) TestMountSpec(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/network_manager_test.go snapd-2.63+23.10/interfaces/builtin/network_manager_test.go --- snapd-2.62+23.10/interfaces/builtin/network_manager_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/network_manager_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -241,7 +241,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# network-manager KERNEL=="rfkill", TAG+="snap_network-manager_nm"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_network-manager_nm", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_network-manager_nm"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_network-manager_nm", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_network-manager_nm $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *NetworkManagerInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/ofono_test.go snapd-2.63+23.10/interfaces/builtin/ofono_test.go --- snapd-2.62+23.10/interfaces/builtin/ofono_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/ofono_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -212,7 +212,7 @@ KERNEL=="tun", TAG+="snap_ofono_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# ofono KERNEL=="dsp", TAG+="snap_ofono_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_ofono_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_ofono_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_ofono_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_ofono_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *OfonoInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/opengl.go snapd-2.63+23.10/interfaces/builtin/opengl.go --- snapd-2.62+23.10/interfaces/builtin/opengl.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/opengl.go 2024-04-24 00:00:39.000000000 +0000 @@ -48,6 +48,9 @@ # libdrm data files /usr/share/libdrm/amdgpu.ids r, +# The nvidia container toolkit needs to traverse the top level libs directory +# in order to discover the libraries and generate a CDI config +/var/lib/snapd/hostfs/{,usr/}lib{,32,64,x32}/{,@{multiarch}/} r, # Bi-arch distribution nvidia support /var/lib/snapd/hostfs/{,usr/}lib{,32,64,x32}/{,@{multiarch}/}libcuda*.so{,.*} rm, /var/lib/snapd/hostfs/{,usr/}lib{,32,64,x32}/{,@{multiarch}/}libnvidia*.so{,.*} rm, diff -Nru snapd-2.62+23.10/interfaces/builtin/opengl_test.go snapd-2.63+23.10/interfaces/builtin/opengl_test.go --- snapd-2.62+23.10/interfaces/builtin/opengl_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/opengl_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -139,7 +139,7 @@ KERNEL=="dma_buf_te", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# opengl KERNEL=="galcore", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *OpenglInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/optical_drive_test.go snapd-2.63+23.10/interfaces/builtin/optical_drive_test.go --- snapd-2.62+23.10/interfaces/builtin/optical_drive_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/optical_drive_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -151,7 +151,7 @@ c.Assert(spec.Snippets(), HasLen, 12) // four rules multiplied by three apps c.Assert(spec.Snippets(), testutil.Contains, `# optical-drive KERNEL=="sr[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *OpticalDriveInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/password_manager_service.go snapd-2.63+23.10/interfaces/builtin/password_manager_service.go --- snapd-2.62+23.10/interfaces/builtin/password_manager_service.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/password_manager_service.go 2024-04-24 00:00:39.000000000 +0000 @@ -70,13 +70,13 @@ # dbus (receive, send) bus=session - path=/modules/kwalletd{,5} + path=/modules/kwalletd{,[56]} interface=org.freedesktop.DBus.* peer=(label=unconfined), dbus (receive, send) bus=session - path=/modules/kwalletd{,5} + path=/modules/kwalletd{,[56]} interface=org.kde.KWallet peer=(label=unconfined), ` diff -Nru snapd-2.62+23.10/interfaces/builtin/physical_memory_control_test.go snapd-2.63+23.10/interfaces/builtin/physical_memory_control_test.go --- snapd-2.62+23.10/interfaces/builtin/physical_memory_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/physical_memory_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# physical-memory-control KERNEL=="mem", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *PhysicalMemoryControlInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/physical_memory_observe_test.go snapd-2.63+23.10/interfaces/builtin/physical_memory_observe_test.go --- snapd-2.62+23.10/interfaces/builtin/physical_memory_observe_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/physical_memory_observe_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -90,7 +90,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# physical-memory-observe KERNEL=="mem", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *PhysicalMemoryObserveInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/polkit.go snapd-2.63+23.10/interfaces/builtin/polkit.go --- snapd-2.62+23.10/interfaces/builtin/polkit.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/polkit.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "fmt" - "io/ioutil" + "os" "path" "path/filepath" "strings" @@ -96,7 +96,7 @@ } func loadPolkitPolicy(filename, actionPrefix string) (polkit.Policy, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf(`cannot read file %q: %v`, filename, err) } diff -Nru snapd-2.62+23.10/interfaces/builtin/posix_mq.go snapd-2.63+23.10/interfaces/builtin/posix_mq.go --- snapd-2.62+23.10/interfaces/builtin/posix_mq.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/posix_mq.go 2024-04-24 00:00:39.000000000 +0000 @@ -65,7 +65,9 @@ mq_unlink mq_notify mq_timedreceive +mq_timedreceive_time64 mq_timedsend +mq_timedsend_time64 ` var posixMQPlugPermissions = []string{ @@ -360,9 +362,11 @@ switch perm { case "read": syscalls = append(syscalls, "mq_timedreceive") + syscalls = append(syscalls, "mq_timedreceive_time64") syscalls = append(syscalls, "mq_notify") case "write": syscalls = append(syscalls, "mq_timedsend") + syscalls = append(syscalls, "mq_timedsend_time64") case "delete": syscalls = append(syscalls, "mq_unlink") } diff -Nru snapd-2.62+23.10/interfaces/builtin/posix_mq_test.go snapd-2.63+23.10/interfaces/builtin/posix_mq_test.go --- snapd-2.62+23.10/interfaces/builtin/posix_mq_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/posix_mq_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,6 +20,8 @@ package builtin_test import ( + "strings" + . "gopkg.in/check.v1" "github.com/snapcore/snapd/interfaces" @@ -444,14 +446,23 @@ s.testPathArrayPlug = interfaces.NewConnectedPlug(s.testPathArrayPlugInfo, nil, nil) } +// splitSnippet converts the trimmed string snippet to a string slice +func splitSnippet(snippet string) []string { + return strings.Split(strings.TrimSpace(snippet), "\n") +} + func (s *PosixMQInterfaceSuite) checkSlotSeccompSnippet(c *C, spec *seccomp.Specification) { slotSnippet := spec.SnippetForTag("snap.producer.app") + + c.Check(splitSnippet(slotSnippet), HasLen, 8) c.Check(slotSnippet, testutil.Contains, "mq_open") c.Check(slotSnippet, testutil.Contains, "mq_unlink") c.Check(slotSnippet, testutil.Contains, "mq_getsetattr") c.Check(slotSnippet, testutil.Contains, "mq_notify") c.Check(slotSnippet, testutil.Contains, "mq_timedreceive") + c.Check(slotSnippet, testutil.Contains, "mq_timedreceive_time64") c.Check(slotSnippet, testutil.Contains, "mq_timedsend") + c.Check(slotSnippet, testutil.Contains, "mq_timedsend_time64") } func (s *PosixMQInterfaceSuite) TestReadWriteMQAppArmor(c *C) { @@ -490,10 +501,13 @@ c.Assert(spec.SecurityTags(), DeepEquals, []string{"snap.consumer.app"}) plugSnippet := spec.SnippetForTag("snap.consumer.app") + c.Check(splitSnippet(plugSnippet), HasLen, 7) c.Check(plugSnippet, testutil.Contains, "mq_open") c.Check(plugSnippet, testutil.Contains, "mq_notify") c.Check(plugSnippet, testutil.Contains, "mq_timedreceive") + c.Check(plugSnippet, testutil.Contains, "mq_timedreceive_time64") c.Check(plugSnippet, testutil.Contains, "mq_timedsend") + c.Check(plugSnippet, testutil.Contains, "mq_timedsend_time64") c.Check(plugSnippet, testutil.Contains, "mq_getsetattr") c.Check(plugSnippet, Not(testutil.Contains), "mq_unlink") } @@ -533,10 +547,13 @@ c.Assert(spec.SecurityTags(), DeepEquals, []string{"snap.consumer.app"}) plugSnippet := spec.SnippetForTag("snap.consumer.app") + c.Check(splitSnippet(plugSnippet), HasLen, 7) c.Check(plugSnippet, testutil.Contains, "mq_open") c.Check(plugSnippet, testutil.Contains, "mq_notify") c.Check(plugSnippet, testutil.Contains, "mq_timedreceive") + c.Check(plugSnippet, testutil.Contains, "mq_timedreceive_time64") c.Check(plugSnippet, testutil.Contains, "mq_timedsend") + c.Check(plugSnippet, testutil.Contains, "mq_timedsend_time64") c.Check(plugSnippet, testutil.Contains, "mq_getsetattr") c.Check(plugSnippet, Not(testutil.Contains), "mq_unlink") } @@ -575,11 +592,14 @@ c.Assert(spec.SecurityTags(), DeepEquals, []string{"snap.consumer.app"}) plugSnippet := spec.SnippetForTag("snap.consumer.app") + c.Check(splitSnippet(plugSnippet), HasLen, 5) c.Check(plugSnippet, testutil.Contains, "mq_open") c.Check(plugSnippet, testutil.Contains, "mq_notify") c.Check(plugSnippet, testutil.Contains, "mq_timedreceive") + c.Check(plugSnippet, testutil.Contains, "mq_timedreceive_time64") c.Check(plugSnippet, testutil.Contains, "mq_getsetattr") c.Check(plugSnippet, Not(testutil.Contains), "mq_timedsend") + c.Check(plugSnippet, Not(testutil.Contains), "mq_timedsend_time64") c.Check(plugSnippet, Not(testutil.Contains), "mq_unlink") } @@ -623,10 +643,13 @@ c.Assert(spec.SecurityTags(), DeepEquals, []string{"snap.consumer.app"}) plugSnippet := spec.SnippetForTag("snap.consumer.app") + c.Check(splitSnippet(plugSnippet), HasLen, 7) c.Check(plugSnippet, testutil.Contains, "mq_open") c.Check(plugSnippet, testutil.Contains, "mq_notify") c.Check(plugSnippet, testutil.Contains, "mq_timedreceive") + c.Check(plugSnippet, testutil.Contains, "mq_timedreceive_time64") c.Check(plugSnippet, testutil.Contains, "mq_timedsend") + c.Check(plugSnippet, testutil.Contains, "mq_timedsend_time64") c.Check(plugSnippet, testutil.Contains, "mq_getsetattr") c.Check(plugSnippet, Not(testutil.Contains), "mq_unlink") } @@ -664,12 +687,15 @@ c.Assert(spec.SecurityTags(), DeepEquals, []string{"snap.consumer.app"}) plugSnippet := spec.SnippetForTag("snap.consumer.app") + c.Check(splitSnippet(plugSnippet), HasLen, 8) c.Check(plugSnippet, testutil.Contains, "mq_open") c.Check(plugSnippet, testutil.Contains, "mq_unlink") c.Check(plugSnippet, testutil.Contains, "mq_getsetattr") c.Check(plugSnippet, testutil.Contains, "mq_notify") c.Check(plugSnippet, testutil.Contains, "mq_timedreceive") + c.Check(plugSnippet, testutil.Contains, "mq_timedreceive_time64") c.Check(plugSnippet, testutil.Contains, "mq_timedsend") + c.Check(plugSnippet, testutil.Contains, "mq_timedsend_time64") } func (s *PosixMQInterfaceSuite) TestPathValidationPosixMQ(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/ppp_test.go snapd-2.63+23.10/interfaces/builtin/ppp_test.go --- snapd-2.62+23.10/interfaces/builtin/ppp_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/ppp_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -98,7 +98,7 @@ c.Assert(spec.Snippets(), HasLen, 3) c.Assert(spec.Snippets(), testutil.Contains, `# ppp KERNEL=="ppp", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *PppInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/ptp_test.go snapd-2.63+23.10/interfaces/builtin/ptp_test.go --- snapd-2.62+23.10/interfaces/builtin/ptp_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/ptp_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# ptp SUBSYSTEM=="ptp", KERNEL=="ptp[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *PTPInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/pulseaudio_test.go snapd-2.63+23.10/interfaces/builtin/pulseaudio_test.go --- snapd-2.62+23.10/interfaces/builtin/pulseaudio_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/pulseaudio_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -162,7 +162,7 @@ KERNEL=="pcmC[0-9]*D[0-9]*[cp]", TAG+="snap_pulseaudio_app1"`) c.Assert(spec.Snippets(), testutil.Contains, `# pulseaudio KERNEL=="timer", TAG+="snap_pulseaudio_app1"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_pulseaudio_app1", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_pulseaudio_app1"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_pulseaudio_app1", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_pulseaudio_app1 $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *PulseAudioInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/raw_input_test.go snapd-2.63+23.10/interfaces/builtin/raw_input_test.go --- snapd-2.62+23.10/interfaces/builtin/raw_input_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/raw_input_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -107,7 +107,7 @@ KERNEL=="mouse[0-9]*", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# raw-input KERNEL=="ts[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) c.Assert(spec.TriggeredSubsystems(), DeepEquals, []string{"input"}) } diff -Nru snapd-2.62+23.10/interfaces/builtin/raw_usb_test.go snapd-2.63+23.10/interfaces/builtin/raw_usb_test.go --- snapd-2.62+23.10/interfaces/builtin/raw_usb_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/raw_usb_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -101,7 +101,7 @@ SUBSYSTEM=="usbmisc", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# raw-usb SUBSYSTEM=="tty", ENV{ID_BUS}=="usb", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *RawUsbInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/raw_volume_test.go snapd-2.63+23.10/interfaces/builtin/raw_volume_test.go --- snapd-2.62+23.10/interfaces/builtin/raw_volume_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/raw_volume_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -298,7 +298,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets()[0], Equals, `# raw-volume KERNEL=="vda1", TAG+="snap_client-snap_app-accessing-1-part"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-1-part", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-1-part"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-1-part", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-1-part $devpath $major:$minor"`, dirs.DistroLibExecDir)) spec = udev.NewSpecification(interfaces.NewSnapAppSet(s.testPlugPart2.Snap())) c.Assert(spec.AddConnectedPlug(s.iface, s.testPlugPart2, s.testUDev2), IsNil) diff -Nru snapd-2.62+23.10/interfaces/builtin/scsi_generic_test.go snapd-2.63+23.10/interfaces/builtin/scsi_generic_test.go --- snapd-2.62+23.10/interfaces/builtin/scsi_generic_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/scsi_generic_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -92,7 +92,7 @@ c.Assert(udevSpec.Snippets(), HasLen, 2) c.Assert(udevSpec.Snippets(), testutil.Contains, `# scsi-generic KERNEL=="sg[0-9]*", TAG+="snap_other_app"`) - c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_other_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_other_app"`, dirs.DistroLibExecDir)) + c.Assert(udevSpec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_other_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_other_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *ScsiGenericInterfaceSuite) TestInterfaces(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/sd_control_test.go snapd-2.63+23.10/interfaces/builtin/sd_control_test.go --- snapd-2.62+23.10/interfaces/builtin/sd_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/sd_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -106,7 +106,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# sd-control KERNEL=="DualSD", TAG+="snap_my-device_svc"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_my-device_svc", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_my-device_svc"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_my-device_svc", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_my-device_svc $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *sdControlSuite) TestUDevConnectedPlugNoFlavor(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/serial_port_test.go snapd-2.63+23.10/interfaces/builtin/serial_port_test.go --- snapd-2.62+23.10/interfaces/builtin/serial_port_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/serial_port_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -506,7 +506,7 @@ SUBSYSTEM=="tty", KERNEL=="ttyS0", TAG+="snap_client-snap_app-accessing-2-ports"` c.Assert(snippet, Equals, expectedSnippet1) extraSnippet := spec.Snippets()[1] - expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-ports"`, dirs.DistroLibExecDir) + expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-ports $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet1) // add plug for the first slot with product and vendor ids @@ -520,7 +520,7 @@ SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="0001", ATTRS{idProduct}=="0001", TAG+="snap_client-snap_app-accessing-2-ports"` c.Assert(snippet, Equals, expectedSnippet2) extraSnippet = spec.Snippets()[1] - expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-ports"`, dirs.DistroLibExecDir) + expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-ports $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet2) // add plug for the first slot with product and vendor ids @@ -534,7 +534,7 @@ SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="ffff", ATTRS{idProduct}=="ffff", TAG+="snap_client-snap_app-accessing-2-ports"` c.Assert(snippet, Equals, expectedSnippet3) extraSnippet = spec.Snippets()[1] - expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-ports"`, dirs.DistroLibExecDir) + expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-ports $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet3) // add plug for the first slot with product and vendor ids and usb interface number @@ -548,7 +548,7 @@ SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="abcd", ATTRS{idProduct}=="1234", ENV{ID_USB_INTERFACE_NUM}=="00", TAG+="snap_client-snap_app-accessing-2-ports"` c.Assert(snippet, Equals, expectedSnippet4) extraSnippet = spec.Snippets()[1] - expectedExtraSnippet4 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-2-ports"`, dirs.DistroLibExecDir) + expectedExtraSnippet4 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-2-ports", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-2-ports $devpath $major:$minor"`, dirs.DistroLibExecDir) c.Assert(extraSnippet, Equals, expectedExtraSnippet4) } @@ -627,80 +627,80 @@ // these have only path expectedSnippet1 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyS0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet1 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot1, expectedSnippet1, expectedExtraSnippet1) expectedSnippet2 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyUSB927", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet2 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot2, expectedSnippet2, expectedExtraSnippet2) expectedSnippet3 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyS42", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet3 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot3, expectedSnippet3, expectedExtraSnippet3) expectedSnippet4 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyO0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet4 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet4 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot4, expectedSnippet4, expectedExtraSnippet4) expectedSnippet5 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyACM0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet5 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet5 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot5, expectedSnippet5, expectedExtraSnippet5) expectedSnippet6 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyAMA0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet6 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet6 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot6, expectedSnippet6, expectedExtraSnippet6) expectedSnippet7 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyXRUSB0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet7 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet7 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot7, expectedSnippet7, expectedExtraSnippet7) expectedSnippet8 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttymxc2", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet8 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet8 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot8, expectedSnippet8, expectedExtraSnippet8) expectedSnippet9 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttySC0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet9 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet9 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot9, expectedSnippet9, expectedExtraSnippet9) expectedSnippet10 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyMSM0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet10 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet10 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot10, expectedSnippet10, expectedExtraSnippet10) expectedSnippet11 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyHS0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet11 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet11 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot11, expectedSnippet11, expectedExtraSnippet11) expectedSnippet12 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyGS0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet12 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet12 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot12, expectedSnippet12, expectedExtraSnippet12) expectedSnippet13 := `# serial-port SUBSYSTEM=="tty", KERNEL=="ttyLP0", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet13 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet13 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testSlot13, expectedSnippet13, expectedExtraSnippet13) // these have product and vendor ids expectedSnippet100 := `# serial-port IMPORT{builtin}="usb_id" SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="0001", ATTRS{idProduct}=="0001", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet100 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet100 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testUDev1, expectedSnippet100, expectedExtraSnippet100) expectedSnippet101 := `# serial-port IMPORT{builtin}="usb_id" SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="ffff", ATTRS{idProduct}=="ffff", TAG+="snap_client-snap_app-accessing-3rd-port"` - expectedExtraSnippet101 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_client-snap_app-accessing-3rd-port"`, dirs.DistroLibExecDir) + expectedExtraSnippet101 := fmt.Sprintf(`TAG=="snap_client-snap_app-accessing-3rd-port", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_client-snap_app-accessing-3rd-port $devpath $major:$minor"`, dirs.DistroLibExecDir) checkConnectedPlugSnippet(s.testPlugPort3, s.testUDev2, expectedSnippet101, expectedExtraSnippet101) } diff -Nru snapd-2.62+23.10/interfaces/builtin/spi_test.go snapd-2.63+23.10/interfaces/builtin/spi_test.go --- snapd-2.62+23.10/interfaces/builtin/spi_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/spi_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -216,7 +216,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# spi KERNEL=="spidev0.0", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *spiInterfaceSuite) TestAppArmorSpec(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/system_observe.go snapd-2.63+23.10/interfaces/builtin/system_observe.go --- snapd-2.62+23.10/interfaces/builtin/system_observe.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/system_observe.go 2024-04-24 00:00:39.000000000 +0000 @@ -156,6 +156,25 @@ member=GetMachineId peer=(label=unconfined), +# Allow clients to get properties of systemd (the manager) and +# units +dbus (send) + bus=system + interface=org.freedesktop.DBus.Properties + path=/org/freedesktop/systemd1{,/**} + member=Get{,All} + peer=(label=unconfined), + +# Allow clients to explicitly list units with some of their details (path, +# status) and get unit path, see +# https://www.freedesktop.org/wiki/Software/systemd/dbus/ for details +dbus (send) + bus=system + path=/org/freedesktop/systemd1 + interface=org.freedesktop.systemd1.Manager + member={GetUnit,ListUnits} + peer=(label=unconfined), + # Allow reading if protected hardlinks are enabled, but don't allow enabling or # disabling them @{PROC}/sys/fs/protected_hardlinks r, diff -Nru snapd-2.62+23.10/interfaces/builtin/tee_test.go snapd-2.63+23.10/interfaces/builtin/tee_test.go --- snapd-2.62+23.10/interfaces/builtin/tee_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/tee_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -95,7 +95,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# tee KERNEL=="qseecom", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, - fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *TeeInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/time_control_test.go snapd-2.63+23.10/interfaces/builtin/time_control_test.go --- snapd-2.62+23.10/interfaces/builtin/time_control_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/time_control_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -111,7 +111,7 @@ SUBSYSTEM=="rtc", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# time-control KERNEL=="pps[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *TimeControlInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/tpm_test.go snapd-2.63+23.10/interfaces/builtin/tpm_test.go --- snapd-2.62+23.10/interfaces/builtin/tpm_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/tpm_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -91,7 +91,7 @@ KERNEL=="tpm[0-9]*", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# tpm KERNEL=="tpmrm[0-9]*", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *TpmInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/u2f_devices_test.go snapd-2.63+23.10/interfaces/builtin/u2f_devices_test.go --- snapd-2.62+23.10/interfaces/builtin/u2f_devices_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/u2f_devices_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -93,7 +93,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# u2f-devices # Yubico YubiKey SUBSYSTEM=="hidraw", KERNEL=="hidraw*", ATTRS{idVendor}=="1050", ATTRS{idProduct}=="0113|0114|0115|0116|0120|0121|0200|0402|0403|0406|0407|0410", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *u2fDevicesInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/udisks2.go snapd-2.63+23.10/interfaces/builtin/udisks2.go --- snapd-2.62+23.10/interfaces/builtin/udisks2.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/udisks2.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "path/filepath" "strings" @@ -445,7 +445,7 @@ if err != nil { return fmt.Errorf("cannot resolve udev-file: %v", err) } - data, err := ioutil.ReadFile(filepath.Join(mountDir, resolvedPath)) + data, err := os.ReadFile(filepath.Join(mountDir, resolvedPath)) if err != nil { return fmt.Errorf("cannot open udev-file: %v", err) } diff -Nru snapd-2.62+23.10/interfaces/builtin/udisks2_test.go snapd-2.63+23.10/interfaces/builtin/udisks2_test.go --- snapd-2.62+23.10/interfaces/builtin/udisks2_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/udisks2_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -262,7 +262,7 @@ SUBSYSTEM=="block", TAG+="snap_producer_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# udisks2 SUBSYSTEM=="usb", TAG+="snap_producer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_producer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_producer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_producer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_producer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *UDisks2InterfaceSuite) TestUDevSpecFile(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/uinput_test.go snapd-2.63+23.10/interfaces/builtin/uinput_test.go --- snapd-2.62+23.10/interfaces/builtin/uinput_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/uinput_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -94,7 +94,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets()[0], Equals, `# uinput KERNEL=="uinput", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *uinputInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/uio_test.go snapd-2.63+23.10/interfaces/builtin/uio_test.go --- snapd-2.62+23.10/interfaces/builtin/uio_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/uio_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -107,7 +107,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# uio SUBSYSTEM=="uio", KERNEL=="uio0", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *uioInterfaceSuite) TestAppArmorConnectedPlugIgnoresMissingConfigFile(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/unity7.go snapd-2.63+23.10/interfaces/builtin/unity7.go --- snapd-2.62+23.10/interfaces/builtin/unity7.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/unity7.go 2024-04-24 00:00:39.000000000 +0000 @@ -362,35 +362,35 @@ path=/{MenuBar{,/[0-9A-F]*},com/canonical/{menu/[0-9A-F]*,dbusmenu}} interface=com.canonical.dbusmenu member="{LayoutUpdated,ItemsPropertiesUpdated}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{MenuBar{,/[0-9A-F]*},com/canonical/{menu/[0-9A-F]*,dbusmenu}} interface="{com.canonical.dbusmenu,org.freedesktop.DBus.Properties}" member=Get* - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{MenuBar{,/[0-9A-F]*},com/canonical/{menu/[0-9A-F]*,dbusmenu}} interface=com.canonical.dbusmenu member="{AboutTo*,Event*}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{MenuBar{,/[0-9A-F]*},com/canonical/{menu/[0-9A-F]*,dbusmenu}} interface=org.freedesktop.DBus.Introspectable member=Introspect - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/com/canonical/dbusmenu interface=org.freedesktop.DBus.Properties member=Get* - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # app-indicators dbus (send) @@ -423,35 +423,35 @@ path=/{StatusNotifierWatcher,org/ayatana/NotificationItem/*} interface=org.kde.StatusNotifierWatcher member=RegisterStatusNotifierItem - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (send) bus=session path=/{StatusNotifierItem,org/ayatana/NotificationItem/*} interface=org.kde.StatusNotifierItem member="New{AttentionIcon,Icon,IconThemePath,OverlayIcon,Status,Title,ToolTip}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{StatusNotifierItem,org/ayatana/NotificationItem/*} interface=org.kde.StatusNotifierItem member={Activate,ContextMenu,Scroll,SecondaryActivate,ProvideXdgActivationToken,XAyatanaSecondaryActivate} - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (send) bus=session path=/{StatusNotifierItem/menu,org/ayatana/NotificationItem/*/Menu} interface=com.canonical.dbusmenu member="{LayoutUpdated,ItemsPropertiesUpdated}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/{StatusNotifierItem,StatusNotifierItem/menu,org/ayatana/NotificationItem/**} interface={org.freedesktop.DBus.Properties,com.canonical.dbusmenu} member={Get*,AboutTo*,Event*} - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # notifications dbus (send) @@ -459,14 +459,14 @@ path=/org/freedesktop/Notifications interface=org.freedesktop.Notifications member="{GetCapabilities,GetServerInformation,Notify,CloseNotification}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/org/freedesktop/Notifications interface=org.freedesktop.Notifications member={ActionInvoked,NotificationClosed,NotificationReplied} - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # KDE Plasma's Inhibited property indicating "do not disturb" mode # https://invent.kde.org/plasma/plasma-workspace/-/blob/master/libnotificationmanager/dbus/org.freedesktop.Notifications.xml#L42 @@ -475,21 +475,21 @@ path=/org/freedesktop/Notifications interface=org.freedesktop.DBus.Properties member="Get{,All}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (receive) bus=session path=/org/freedesktop/Notifications interface=org.freedesktop.DBus.Properties member=PropertiesChanged - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), dbus (send) bus=session path=/org/ayatana/NotificationItem/* interface=org.kde.StatusNotifierItem member=XAyatanaNew* - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # unity launcher dbus (send) @@ -591,7 +591,7 @@ path=/com/canonical/menu/[0-9]* interface="{org.freedesktop.DBus.Properties,com.canonical.dbusmenu}" member="{GetAll,GetLayout}" - peer=(label=unconfined), + peer=(label="{plasmashell,unconfined}"), # Allow requesting interest in receiving media key events. This tells Gnome # settings that our application should be notified when key events we are diff -Nru snapd-2.62+23.10/interfaces/builtin/vcio_test.go snapd-2.63+23.10/interfaces/builtin/vcio_test.go --- snapd-2.62+23.10/interfaces/builtin/vcio_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/vcio_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -89,7 +89,7 @@ c.Assert(spec.Snippets(), HasLen, 2) c.Assert(spec.Snippets(), testutil.Contains, `# vcio SUBSYSTEM=="bcm2708_vcio", KERNEL=="vcio", TAG+="snap_consumer_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *VcioInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/builtin/wayland_test.go snapd-2.63+23.10/interfaces/builtin/wayland_test.go --- snapd-2.62+23.10/interfaces/builtin/wayland_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/wayland_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -184,7 +184,7 @@ KERNEL=="ts[0-9]*", TAG+="snap_wayland_app1"`) c.Assert(spec.Snippets(), testutil.Contains, `# wayland KERNEL=="tty[0-9]*", TAG+="snap_wayland_app1"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_wayland_app1", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_wayland_app1"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_wayland_app1", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_wayland_app1 $devpath $major:$minor"`, dirs.DistroLibExecDir)) c.Assert(spec.TriggeredSubsystems(), DeepEquals, []string{"input"}) } diff -Nru snapd-2.62+23.10/interfaces/builtin/x11_test.go snapd-2.63+23.10/interfaces/builtin/x11_test.go --- snapd-2.62+23.10/interfaces/builtin/x11_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/x11_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -254,7 +254,7 @@ KERNEL=="ts[0-9]*", TAG+="snap_x11_app"`) c.Assert(spec.Snippets(), testutil.Contains, `# x11 KERNEL=="tty[0-9]*", TAG+="snap_x11_app"`) - c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_x11_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_x11_app"`, dirs.DistroLibExecDir)) + c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf(`TAG=="snap_x11_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_x11_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) c.Assert(spec.TriggeredSubsystems(), DeepEquals, []string{"input"}) // on a classic system with x11 slot coming from the core snap. diff -Nru snapd-2.62+23.10/interfaces/builtin/xilinx_dma_test.go snapd-2.63+23.10/interfaces/builtin/xilinx_dma_test.go --- snapd-2.62+23.10/interfaces/builtin/xilinx_dma_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/builtin/xilinx_dma_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -93,7 +93,7 @@ c.Assert(spec.Snippets(), testutil.Contains, `# xilinx-dma SUBSYSTEM=="xdma", TAG+="snap_consumer_app"`) c.Assert(spec.Snippets(), testutil.Contains, fmt.Sprintf( - `TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper snap_consumer_app"`, dirs.DistroLibExecDir)) + `TAG=="snap_consumer_app", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%v/snap-device-helper $env{ACTION} snap_consumer_app $devpath $major:$minor"`, dirs.DistroLibExecDir)) } func (s *XilinxDmaInterfaceSuite) TestStaticInfo(c *C) { diff -Nru snapd-2.62+23.10/interfaces/export_test.go snapd-2.63+23.10/interfaces/export_test.go --- snapd-2.62+23.10/interfaces/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -43,12 +43,12 @@ func (c ByInterfaceName) Swap(i, j int) { byInterfaceName(c).Swap(i, j) } func (c ByInterfaceName) Less(i, j int) bool { return byInterfaceName(c).Less(i, j) } -// MockIsHomeUsingNFS mocks the real implementation of osutil.IsHomeUsingNFS -func MockIsHomeUsingNFS(new func() (bool, error)) (restore func()) { - old := isHomeUsingNFS - isHomeUsingNFS = new +// MockIsHomeUsingRemoteFS mocks the real implementation of osutil.IsHomeUsingRemoteFS +func MockIsHomeUsingRemoteFS(new func() (bool, error)) (restore func()) { + old := isHomeUsingRemoteFS + isHomeUsingRemoteFS = new return func() { - isHomeUsingNFS = old + isHomeUsingRemoteFS = old } } diff -Nru snapd-2.62+23.10/interfaces/mount/backend_test.go snapd-2.63+23.10/interfaces/mount/backend_test.go --- snapd-2.62+23.10/interfaces/mount/backend_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/mount/backend_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -158,14 +157,14 @@ expected := strings.Split(fmt.Sprintf("%s\n%s\n", fsEntry1, fsEntry2), "\n") // and that we have the modern fstab file (global for snap) fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab") - content, err := ioutil.ReadFile(fn) + content, err := os.ReadFile(fn) c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap")) got := strings.Split(string(content), "\n") c.Check(got, testutil.DeepUnsortedMatches, expected) // Check that the user-fstab file was written with the user mount fn = filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.user-fstab") - content, err = ioutil.ReadFile(fn) + content, err = os.ReadFile(fn) c.Assert(err, IsNil, Commentf("Expected user mount profile for the whole snap")) c.Check(string(content), Equals, fsEntry3.String()+"\n") } @@ -276,7 +275,7 @@ expected := strings.Split(fmt.Sprintf("%s\n%s\n", fsEntry1, fsEntry2), "\n") // and that we have the modern fstab file (global for snap) fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab") - content, err := ioutil.ReadFile(fn) + content, err := os.ReadFile(fn) c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap")) got := strings.Split(string(content), "\n") c.Check(got, testutil.DeepUnsortedMatches, expected) @@ -297,7 +296,7 @@ // (because mount profiles are global in the whole snap) expected = strings.Split(fmt.Sprintf("%s\n%s\n%s\n", fsEntry1, fsEntry2, fsEntry3), "\n") // and that we have the modern fstab file (global for snap) - content, err = ioutil.ReadFile(fn) + content, err = os.ReadFile(fn) c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap")) got = strings.Split(string(content), "\n") c.Check(got, testutil.DeepUnsortedMatches, expected) @@ -348,7 +347,7 @@ expected := strings.Split(fmt.Sprintf("%s\n%s\n%s\n", fsEntry1, fsEntry2, fsEntry3), "\n") // and that we have the modern fstab file (global for snap) fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab") - content, err := ioutil.ReadFile(fn) + content, err := os.ReadFile(fn) c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap")) got := strings.Split(string(content), "\n") c.Check(got, testutil.DeepUnsortedMatches, expected) @@ -417,7 +416,7 @@ expected := strings.Split(fmt.Sprintf("%s\n%s\n%s\n", fsEntry1, fsEntry2, fsEntry3), "\n") // and that we have the modern fstab file (global for snap) fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab") - content, err := ioutil.ReadFile(fn) + content, err := os.ReadFile(fn) c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap")) got := strings.Split(string(content), "\n") c.Check(got, testutil.DeepUnsortedMatches, expected) diff -Nru snapd-2.62+23.10/interfaces/seccomp/backend.go snapd-2.63+23.10/interfaces/seccomp/backend.go --- snapd-2.62+23.10/interfaces/seccomp/backend.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/seccomp/backend.go 2024-04-24 00:00:39.000000000 +0000 @@ -79,6 +79,16 @@ versionInfo seccomp.VersionInfo } +// TODO: now that snap-seccomp has full support for deny-listing this +// should be replaced with something like: +// +// ~ioctl - 4294967295|TIOCSTI +// ~ioctl - 4294967295|TIOCLINUX +// +// in the default template. This requires that MaskedEq learns +// to deal with two arguments (see also https://github.com/snapcore/snapd/compare/master...mvo5:rework-seccomp-denylist-incoperate-global.bin?expand=1) +// +// globalProfileLE is generated via cmd/snap-seccomp-black list var globalProfileLE = []byte{ 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x04, 0x3e, 0x00, 0x00, 0xc0, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, @@ -92,6 +102,7 @@ 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x7f, } +// globalProfileBE is generated via cmd/snap-seccomp-black list var globalProfileBE = []byte{ 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x15, 0x00, 0x08, 0x80, 0x00, 0x00, 0x16, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x06, 0x00, 0x00, 0x00, 0x36, @@ -148,7 +159,7 @@ } func bpfBinPath(srcName string) string { - return filepath.Join(dirs.SnapSeccompDir, strings.TrimSuffix(srcName, ".src")+".bin") + return filepath.Join(dirs.SnapSeccompDir, strings.TrimSuffix(srcName, ".src")+".bin2") } func parallelCompile(compiler Compiler, profiles []string) error { diff -Nru snapd-2.62+23.10/interfaces/seccomp/backend_test.go snapd-2.63+23.10/interfaces/seccomp/backend_test.go --- snapd-2.62+23.10/interfaces/seccomp/backend_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/seccomp/backend_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/binary" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -133,7 +132,7 @@ c.Check(err, IsNil) // and got compiled c.Check(s.snapSeccomp.Calls(), DeepEquals, [][]string{ - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, }) } @@ -146,7 +145,7 @@ c.Check(err, IsNil) // and got compiled c.Check(s.snapSeccomp.Calls(), DeepEquals, [][]string{ - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, }) } @@ -178,9 +177,9 @@ // ensure the snap-seccomp from the core snap was used instead c.Check(snapSeccompOnCore.Calls(), DeepEquals, [][]string{ {"snap-seccomp", "version-info"}, // from Initialize() - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, }) - raw, err := ioutil.ReadFile(profile + ".src") + raw, err := os.ReadFile(profile + ".src") c.Assert(err, IsNil) c.Assert(bytes.HasPrefix(raw, []byte(`# snap-seccomp version information: # 2345cdef 2.3.4 2345cdef - @@ -219,7 +218,7 @@ // file called "snap.sambda.nmbd" was created c.Check(err, IsNil) // and got compiled - c.Check(s.snapSeccomp.Calls(), testutil.DeepContains, []string{"snap-seccomp", "compile", profile + ".src", profile + ".bin"}) + c.Check(s.snapSeccomp.Calls(), testutil.DeepContains, []string{"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}) s.snapSeccomp.ForgetCalls() s.RemoveSnap(c, snapInfo) @@ -236,7 +235,7 @@ // Verify that profile "snap.samba.hook.configure" was created. c.Check(err, IsNil) // and got compiled - c.Check(s.snapSeccomp.Calls(), testutil.DeepContains, []string{"snap-seccomp", "compile", profile + ".src", profile + ".bin"}) + c.Check(s.snapSeccomp.Calls(), testutil.DeepContains, []string{"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}) s.snapSeccomp.ForgetCalls() s.RemoveSnap(c, snapInfo) @@ -275,7 +274,7 @@ err := s.Backend.Setup(appSet, interfaces.ConfinementOptions{}, s.Repo, s.meas) c.Assert(err, IsNil) profile := filepath.Join(dirs.SnapSeccompDir, "snap.samba.smbd") - data, err := ioutil.ReadFile(profile + ".src") + data, err := os.ReadFile(profile + ".src") c.Assert(err, IsNil) for _, line := range []string{ // NOTE: a few randomly picked lines from the real profile. Comments @@ -652,7 +651,7 @@ c.Check(profile+".src", testutil.FileEquals, s.profileHeader+"\ndefault\n") c.Check(s.snapSeccomp.Calls(), DeepEquals, [][]string{ - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, }) // unchanged snap-seccomp version will not trigger a rebuild @@ -678,7 +677,7 @@ c.Check(s.snapSeccomp.Calls(), HasLen, 2) c.Check(s.snapSeccomp.Calls(), DeepEquals, [][]string{ // compilation from first Setup() - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, // initialization with new version {"snap-seccomp", "version-info"}, }) @@ -691,11 +690,11 @@ c.Check(s.snapSeccomp.Calls(), HasLen, 3) c.Check(s.snapSeccomp.Calls(), DeepEquals, [][]string{ // compilation from first Setup() - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, // initialization with new version {"snap-seccomp", "version-info"}, // compilation of profiles with new compiler version - {"snap-seccomp", "compile", profile + ".src", profile + ".bin"}, + {"snap-seccomp", "compile", profile + ".src", profile + ".bin2"}, }) } @@ -764,7 +763,7 @@ c.Assert(err, IsNil) // NOTE: we don't call seccomp.MockTemplate() profile := filepath.Join(dirs.SnapSeccompDir, "snap.app.cmd") - data, err := ioutil.ReadFile(profile + ".src") + data, err := os.ReadFile(profile + ".src") c.Assert(err, IsNil) for _, line := range []string{ // NOTE: a few randomly picked lines from the real @@ -810,7 +809,7 @@ c.Assert(err, IsNil) // NOTE: we don't call seccomp.MockTemplate() profile := filepath.Join(dirs.SnapSeccompDir, "snap.app.cmd") - data, err := ioutil.ReadFile(profile + ".src") + data, err := os.ReadFile(profile + ".src") c.Assert(err, IsNil) for _, line := range []string{ // and a few randomly picked lines from root syscalls @@ -922,7 +921,7 @@ c.Assert(m.profiles, DeepEquals, profiles) for _, p := range profiles { - c.Check(filepath.Join(dirs.SnapSeccompDir, p+".bin"), testutil.FileEquals, "done "+p+".bin") + c.Check(filepath.Join(dirs.SnapSeccompDir, p+".bin2"), testutil.FileEquals, "done "+p+".bin2") } } @@ -948,10 +947,10 @@ } m := mockedSyncedFailingCompiler{ // pretend compilation of those 2 fails - whichFail: []string{"profile-005.bin", "profile-009.bin"}, + whichFail: []string{"profile-005.bin2", "profile-009.bin2"}, } err = seccomp.ParallelCompile(&m, profiles) - c.Assert(err, ErrorMatches, "cannot compile .*/bpf/profile-00[59]: failed profile-00[59].bin") + c.Assert(err, ErrorMatches, "cannot compile .*/bpf/profile-00[59]: failed profile-00[59].bin2") // make sure all compiled profiles were removed d, err := os.Open(dirs.SnapSeccompDir) @@ -965,16 +964,17 @@ func (s *backendSuite) TestParallelCompileRemovesFirst(c *C) { err := os.MkdirAll(dirs.SnapSeccompDir, 0755) c.Assert(err, IsNil) - err = os.WriteFile(filepath.Join(dirs.SnapSeccompDir, "profile-001.bin"), nil, 0755) + err = os.WriteFile(filepath.Join(dirs.SnapSeccompDir, "profile-001.bin2"), nil, 0755) c.Assert(err, IsNil) - // make profiles directory non-accessible err = os.Chmod(dirs.SnapSeccompDir, 0000) c.Assert(err, IsNil) + err = os.Chmod(dirs.SnapSeccompDir, 0500) + c.Assert(err, IsNil) defer os.Chmod(dirs.SnapSeccompDir, 0755) m := mockedSyncedCompiler{} err = seccomp.ParallelCompile(&m, []string{"profile-001"}) - c.Assert(err, ErrorMatches, "remove .*/profile-001.bin: permission denied") + c.Assert(err, ErrorMatches, "remove .*/profile-001.bin2: permission denied") } diff -Nru snapd-2.62+23.10/interfaces/seccomp/template.go snapd-2.63+23.10/interfaces/seccomp/template.go --- snapd-2.62+23.10/interfaces/seccomp/template.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/seccomp/template.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,7 @@ var defaultTemplate = []byte(` # Description: Allows access to app-specific directories and basic runtime # -# The default seccomp policy is default deny with a whitelist of allowed +# The default seccomp policy is default deny with an allowlist of allowed # syscalls. The default policy is intended to be safe for any application to # use and should be evaluated in conjunction with other security backends (eg # AppArmor). For example, a few particularly problematic syscalls that are left @@ -205,14 +205,7 @@ # TODO: this should be scaled back even more ~ioctl - TIOCSTI ~ioctl - TIOCLINUX -# restrict argument otherwise will match all uses of ioctl() and allow the rules -# that were disallowed above -# TODO: Fix the need to keep TIOCLINUX here - the issue is a unrestricted -# allow for "ioctl" here makes libseccomp "optimize" the deny rules -# above away and the generated bpf becomes just "allow ioctl". -# We should fix this by creating a way to make "AND" rules, so -# this becomes "ioctl - !TIOCSTI&&!TIOCLINUX" and remove the "~" again. -ioctl - !TIOCSTI +ioctl io_cancel io_destroy diff -Nru snapd-2.62+23.10/interfaces/system_key.go snapd-2.63+23.10/interfaces/system_key.go --- snapd-2.62+23.10/interfaces/system_key.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/system_key.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -70,6 +69,9 @@ // kernel version or similar settings. If those change we may // need to change the generated profiles (e.g. when the user // boots into a more featureful seccomp). + // + // As an exception, the NFSHome is not renamed to RemoteFSHome + // to avoid needless re-computation. AppArmorFeatures []string `json:"apparmor-features"` AppArmorParserMtime int64 `json:"apparmor-parser-mtime"` AppArmorParserFeatures []string `json:"apparmor-parser-features"` @@ -84,7 +86,7 @@ const systemKeyVersion = 10 var ( - isHomeUsingNFS = osutil.IsHomeUsingNFS + isHomeUsingRemoteFS = osutil.IsHomeUsingRemoteFS isRootWritableOverlay = osutil.IsRootWritableOverlay mockedSystemKey *systemKey @@ -120,10 +122,10 @@ // Add apparmor-parser-mtime sk.AppArmorParserMtime = apparmor.ParserMtime() - // Add if home is using NFS, if so we need to have a different - // security profile and if this changes we need to change our + // Add if home is using a remote file system, if so we need to have a + // different security profile and if this changes we need to change our // profile. - sk.NFSHome, err = isHomeUsingNFS() + sk.NFSHome, err = isHomeUsingRemoteFS() if err != nil { // just log the error here logger.Noticef("cannot determine nfs usage in generateSystemKey: %v", err) @@ -269,7 +271,7 @@ } func readSystemKey() (*systemKey, error) { - raw, err := ioutil.ReadFile(dirs.SnapSystemKeyFile) + raw, err := os.ReadFile(dirs.SnapSystemKeyFile) if err != nil && os.IsNotExist(err) { return nil, ErrSystemKeyMissing } diff -Nru snapd-2.62+23.10/interfaces/system_key_test.go snapd-2.63+23.10/interfaces/system_key_test.go --- snapd-2.62+23.10/interfaces/system_key_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/system_key_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -81,12 +80,12 @@ dirs.SetRootDir("/") } -func (s *systemKeySuite) testInterfaceWriteSystemKey(c *C, nfsHome, overlayRoot bool) { +func (s *systemKeySuite) testInterfaceWriteSystemKey(c *C, remoteFSHome, overlayRoot bool) { var overlay string if overlayRoot { overlay = "overlay" } - restore := interfaces.MockIsHomeUsingNFS(func() (bool, error) { return nfsHome, nil }) + restore := interfaces.MockIsHomeUsingRemoteFS(func() (bool, error) { return remoteFSHome, nil }) defer restore() restore = interfaces.MockReadBuildID(func(p string) (string, error) { @@ -104,7 +103,7 @@ err := interfaces.WriteSystemKey() c.Assert(err, IsNil) - systemKey, err := ioutil.ReadFile(dirs.SnapSystemKeyFile) + systemKey, err := os.ReadFile(dirs.SnapSystemKeyFile) c.Assert(err, IsNil) kernelFeatures, _ := apparmor.KernelFeatures() @@ -136,18 +135,18 @@ apparmorFeaturesStr, apparmorParserMtime, apparmorParserFeaturesStr, - nfsHome, + remoteFSHome, overlay, seccompActionsStr, seccompCompilerVersion, )) } -func (s *systemKeySuite) TestInterfaceWriteSystemKeyNoNFS(c *C) { +func (s *systemKeySuite) TestInterfaceWriteSystemKeyNoRemoteFS(c *C) { s.testInterfaceWriteSystemKey(c, false, false) } -func (s *systemKeySuite) TestInterfaceWriteSystemKeyWithNFS(c *C) { +func (s *systemKeySuite) TestInterfaceWriteSystemKeyWithRemoteFS(c *C) { s.testInterfaceWriteSystemKey(c, true, false) } @@ -156,12 +155,12 @@ } // bonus points to someone who actually runs this -func (s *systemKeySuite) TestInterfaceWriteSystemKeyWithNFSWithOverlayRoot(c *C) { +func (s *systemKeySuite) TestInterfaceWriteSystemKeyWithRemoteFSWithOverlayRoot(c *C) { s.testInterfaceWriteSystemKey(c, true, true) } func (s *systemKeySuite) TestInterfaceWriteSystemKeyErrorOnBuildID(c *C) { - restore := interfaces.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore := interfaces.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = interfaces.MockReadBuildID(func(p string) (string, error) { diff -Nru snapd-2.62+23.10/interfaces/udev/backend.go snapd-2.63+23.10/interfaces/udev/backend.go --- snapd-2.62+23.10/interfaces/udev/backend.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/udev/backend.go 2024-04-24 00:00:39.000000000 +0000 @@ -153,15 +153,19 @@ var deviceBuf bytes.Buffer deviceBuf.WriteString("# This file is automatically generated.\n") - deviceBuf.WriteString("# snap is allowed to manage own device cgroup.\n") if udevSpec.ControlsDeviceCgroup() { // The spec states that the snap can manage its own device // cgroup (typically applies to container-like snaps), in which // case leave a flag for snap-confine in at a known location. - + deviceBuf.WriteString("# snap is allowed to manage own device cgroup.\n") deviceBuf.WriteString("self-managed=true\n") } + if (opts.DevMode || opts.Classic) && !opts.JailMode { + // Allow devmode + deviceBuf.WriteString("# snap uses non-strict confinement.\n") + deviceBuf.WriteString("non-strict=true\n") + } // the file serves as a checkpoint that udev backend was set up err = osutil.EnsureFileState(selfManageDeviceCgroupPath, &osutil.MemoryFileState{ diff -Nru snapd-2.62+23.10/interfaces/udev/backend_test.go snapd-2.63+23.10/interfaces/udev/backend_test.go --- snapd-2.62+23.10/interfaces/udev/backend_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/udev/backend_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -332,6 +332,17 @@ stat, err := os.Stat(fname) c.Assert(err, IsNil) c.Check(stat.Mode(), Equals, os.FileMode(0644)) + + cgroupFname := filepath.Join(dirs.SnapCgroupPolicyDir, "snap.samba.device") + if !opts.DevMode && !opts.Classic { + c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n") + } else { + c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n"+ + "# snap uses non-strict confinement.\n"+ + "non-strict=true\n", + ) + } + s.RemoveSnap(c, snapInfo) } } @@ -348,10 +359,19 @@ fname := filepath.Join(dirs.SnapUdevRulesDir, "70-snap.samba.rules") c.Check(fname, testutil.FileAbsent) cgroupFname := filepath.Join(dirs.SnapCgroupPolicyDir, "snap.samba.device") - c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n"+ - "# snap is allowed to manage own device cgroup.\n"+ - "self-managed=true\n", - ) + if !opts.DevMode && !opts.Classic { + c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n"+ + "# snap is allowed to manage own device cgroup.\n"+ + "self-managed=true\n", + ) + } else { + c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n"+ + "# snap is allowed to manage own device cgroup.\n"+ + "self-managed=true\n"+ + "# snap uses non-strict confinement.\n"+ + "non-strict=true\n", + ) + } c.Check(s.udevadmCmd.Calls(), HasLen, 0) s.RemoveSnap(c, snapInfo) c.Check(cgroupFname, testutil.FileAbsent) @@ -407,9 +427,7 @@ fname := filepath.Join(dirs.SnapUdevRulesDir, "70-snap.samba.rules") snapInfo := s.InstallSnap(c, interfaces.ConfinementOptions{}, "", ifacetest.SambaYamlV1, 0) // device cgroup self manage flag is gone now - c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n"+ - "# snap is allowed to manage own device cgroup.\n", - ) + c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n") // and we have the rules file c.Check(fname, testutil.FileEquals, "# This file is automatically generated.\nsample\n") // and udev was called @@ -634,9 +652,7 @@ fname := filepath.Join(dirs.SnapUdevRulesDir, "70-snap.samba.rules") s.InstallSnap(c, interfaces.ConfinementOptions{}, "", ifacetest.SambaYamlV1, 0) // device cgroup self manage flag is gone now - c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n"+ - "# snap is allowed to manage own device cgroup.\n", - ) + c.Check(cgroupFname, testutil.FileEquals, "# This file is automatically generated.\n") // and we have the rules file c.Check(fname, testutil.FileEquals, "# This file is automatically generated.\nsample\n") diff -Nru snapd-2.62+23.10/interfaces/udev/spec.go snapd-2.63+23.10/interfaces/udev/spec.go --- snapd-2.62+23.10/interfaces/udev/spec.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/udev/spec.go 2024-04-24 00:00:39.000000000 +0000 @@ -110,7 +110,7 @@ // SUBSYSTEM=="subsystem" is for subsystems (the top directories in /sys/class). Not for devices. // When loaded, they send an ADD event // snap-device-helper expects devices only, not modules nor subsystems - spec.addEntry(fmt.Sprintf("TAG==\"%s\", SUBSYSTEM!=\"module\", SUBSYSTEM!=\"subsystem\", RUN+=\"%s/snap-device-helper %s\"", + spec.addEntry(fmt.Sprintf("TAG==\"%s\", SUBSYSTEM!=\"module\", SUBSYSTEM!=\"subsystem\", RUN+=\"%s/snap-device-helper $env{ACTION} %s $devpath $major:$minor\"", tag, dirs.DistroLibExecDir, tag), tag) } } diff -Nru snapd-2.62+23.10/interfaces/udev/spec_test.go snapd-2.63+23.10/interfaces/udev/spec_test.go --- snapd-2.62+23.10/interfaces/udev/spec_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/interfaces/udev/spec_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -124,12 +124,12 @@ kernel="voodoo", TAG+="snap_snap1_foo"`, `# iface-2 kernel="hoodoo", TAG+="snap_snap1_foo"`, - fmt.Sprintf(`TAG=="snap_snap1_foo", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%s/snap-device-helper snap_snap1_foo"`, helperDir), + fmt.Sprintf(`TAG=="snap_snap1_foo", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%s/snap-device-helper $env{ACTION} snap_snap1_foo $devpath $major:$minor"`, helperDir), `# iface-1 kernel="voodoo", TAG+="snap_snap1_hook_configure"`, `# iface-2 kernel="hoodoo", TAG+="snap_snap1_hook_configure"`, - fmt.Sprintf(`TAG=="snap_snap1_hook_configure", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%[1]s/snap-device-helper snap_snap1_hook_configure"`, helperDir), + fmt.Sprintf(`TAG=="snap_snap1_hook_configure", SUBSYSTEM!="module", SUBSYSTEM!="subsystem", RUN+="%[1]s/snap-device-helper $env{ACTION} snap_snap1_hook_configure $devpath $major:$minor"`, helperDir), }) } diff -Nru snapd-2.62+23.10/kernel/kernel.go snapd-2.63+23.10/kernel/kernel.go --- snapd-2.62+23.10/kernel/kernel.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/kernel/kernel.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -66,7 +65,7 @@ // in the snap root directory if the file exists. func ReadInfo(kernelSnapRootDir string) (*Info, error) { p := filepath.Join(kernelSnapRootDir, "meta", "kernel.yaml") - content, err := ioutil.ReadFile(p) + content, err := os.ReadFile(p) // meta/kernel.yaml is optional so we should not error here if // it is missing if os.IsNotExist(err) { diff -Nru snapd-2.62+23.10/kernel/kernel_drivers.go snapd-2.63+23.10/kernel/kernel_drivers.go --- snapd-2.62+23.10/kernel/kernel_drivers.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/kernel/kernel_drivers.go 2024-04-24 00:00:39.000000000 +0000 @@ -41,21 +41,6 @@ // separated by dots for the kernel version. var utsRelease = regexp.MustCompile(`^([0-9]+\.){2}[0-9]+`) -const earlyKernelsDir = "mnt/kernel-snaps" - -// EarlyKernelModsComponentMountDir returns the path where components are mounted on -// early boot. -func EarlyKernelModsComponentMountDir(compName string, compRev snap.Revision, ksnapName string, snapRev snap.Revision) string { - return filepath.Join(dirs.RunDir, earlyKernelsDir, - ksnapName, "components", snapRev.String(), compName, compRev.String()) -} - -// EarlyKernelMountDir returns the mount directory for early on boot mount of -// kernel snaps. -func EarlyKernelMountDir(ksnapName string, rev snap.Revision) string { - return filepath.Join(dirs.RunDir, "mnt/kernel-snaps", ksnapName, rev.String()) -} - // KernelVersionFromModulesDir returns the kernel version for a mounted kernel // snap (this would be the output if "uname -r" for a running kernel). It // assumes that there is a folder named modules/$(uname -r) inside the snap. @@ -189,10 +174,10 @@ // Symbolic links to components for _, ci := range compInfos { - compMntDir := filepath.Join(EarlyKernelModsComponentMountDir( - ci.Component.ComponentName, ci.Revision, kname, krev)) + compPI := snap.MinimalComponentContainerPlaceInfo(ci.Component.ComponentName, + ci.Revision, kname) lname := filepath.Join(compsRoot, ci.Component.ComponentName) - to := filepath.Join(compMntDir, "modules", kversion) + to := filepath.Join(compPI.MountDir(), "modules", kversion) if err := osSymlink(to, lname); err != nil { return err } @@ -296,10 +281,9 @@ return err } for _, kmi := range kmodsInfos { - compMntDir := filepath.Join(EarlyKernelModsComponentMountDir( - kmi.Component.ComponentName, - kmi.Revision, ksnapName, rev)) - if err := createFirmwareSymlinks(compMntDir, updateFwDir); err != nil { + compPI := snap.MinimalComponentContainerPlaceInfo(kmi.Component.ComponentName, + kmi.Revision, ksnapName) + if err := createFirmwareSymlinks(compPI.MountDir(), updateFwDir); err != nil { return err } } diff -Nru snapd-2.62+23.10/kernel/kernel_drivers_test.go snapd-2.63+23.10/kernel/kernel_drivers_test.go --- snapd-2.62+23.10/kernel/kernel_drivers_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/kernel/kernel_drivers_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -158,7 +158,7 @@ } func testBuildKernelDriversTree(c *C) { - mountDir := filepath.Join(dirs.RunDir, "mnt/pc-kernel") + mountDir := filepath.Join(dirs.SnapMountDir, "pc-kernel/1") kversion := "5.15.0-78-generic" createKernelSnapFiles(c, kversion, mountDir) @@ -211,7 +211,7 @@ buf, restore := logger.MockLogger() defer restore() - mountDir := filepath.Join(dirs.RunDir, "mnt/pc-kernel") + mountDir := filepath.Join(dirs.SnapMountDir, "pc-kernel/11") c.Assert(os.MkdirAll(mountDir, 0755), IsNil) // Build the tree should not fail @@ -414,12 +414,12 @@ mockCmd := testutil.MockCommand(c, "depmod", "") defer mockCmd.Restore() - mountDir := filepath.Join(dirs.RunDir, "mnt/pc-kernel") + mountDir := filepath.Join(dirs.SnapMountDir, "pc-kernel/1") kversion := "5.15.0-78-generic" createKernelSnapFiles(c, kversion, mountDir) - compMntDir1 := filepath.Join(dirs.RunDir, "mnt/kernel-snaps/pc-kernel/components/1/comp1/11") - compMntDir2 := filepath.Join(dirs.RunDir, "mnt/kernel-snaps/pc-kernel/components/1/comp2/22") + compMntDir1 := filepath.Join(dirs.SnapMountDir, "pc-kernel/components/mnt/comp1/11") + compMntDir2 := filepath.Join(dirs.SnapMountDir, "pc-kernel/components/mnt/comp2/22") createKernelModulesCompFiles(c, kversion, compMntDir1, "comp1") createKernelModulesCompFiles(c, kversion, compMntDir2, "comp2") kmods := []*snap.ComponentSideInfo{ diff -Nru snapd-2.62+23.10/osutil/bootid.go snapd-2.63+23.10/osutil/bootid.go --- snapd-2.62+23.10/osutil/bootid.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/bootid.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package osutil import ( - "io/ioutil" + "io" "os" "strings" ) @@ -32,7 +32,7 @@ return "", err } defer file.Close() - bytes, err := ioutil.ReadAll(file) + bytes, err := io.ReadAll(file) if err != nil { return "", err } diff -Nru snapd-2.62+23.10/osutil/disks/disks_linux.go snapd-2.63+23.10/osutil/disks/disks_linux.go --- snapd-2.62+23.10/osutil/disks/disks_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/disks/disks_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,7 +26,7 @@ "errors" "fmt" "io" - "io/ioutil" + "os" "os/exec" "path/filepath" "sort" @@ -495,13 +495,13 @@ majmin := props["MAJOR"] + ":" + props["MINOR"] dmDir := filepath.Join(dirs.SysfsDir, "dev", "block", majmin, "dm") - dmUUID, err := ioutil.ReadFile(filepath.Join(dmDir, "uuid")) + dmUUID, err := os.ReadFile(filepath.Join(dmDir, "uuid")) if err != nil { return nil, fmt.Errorf(errFmt, err) } dmUUID = bytes.TrimSpace(dmUUID) - dmName, err := ioutil.ReadFile(filepath.Join(dmDir, "name")) + dmName, err := os.ReadFile(filepath.Join(dmDir, "name")) if err != nil { return nil, fmt.Errorf(errFmt, err) } @@ -687,7 +687,7 @@ // /dev/mmcblk0boot0 disk device on the dragonboard which exists // under the /dev/mmcblk0 disk, but is not a partition and is // instead a proper disk - _, err := ioutil.ReadFile(filepath.Join(path, "partition")) + _, err := os.ReadFile(filepath.Join(path, "partition")) if err != nil { continue } @@ -934,7 +934,7 @@ // get disks for every block device in /sys/block/ blockDir := filepath.Join(dirs.SysfsDir, "block") - files, err := ioutil.ReadDir(blockDir) + files, err := os.ReadDir(blockDir) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/osutil/export_test.go snapd-2.63+23.10/osutil/export_test.go --- snapd-2.62+23.10/osutil/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "fmt" "io" - "io/ioutil" "os" "os/exec" "os/user" @@ -184,7 +183,7 @@ // MockEtcFstab mocks content of /etc/fstab read by IsHomeUsingNFS func MockEtcFstab(text string) (restore func()) { old := etcFstab - f, err := ioutil.TempFile("", "fstab") + f, err := os.CreateTemp("", "fstab") if err != nil { panic(fmt.Errorf("cannot open temporary file: %s", err)) } diff -Nru snapd-2.62+23.10/osutil/inotify/inotify_linux_test.go snapd-2.63+23.10/osutil/inotify/inotify_linux_test.go --- snapd-2.62+23.10/osutil/inotify/inotify_linux_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/inotify/inotify_linux_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -7,7 +7,6 @@ package inotify_test import ( - "io/ioutil" "os" "sync/atomic" "testing" @@ -23,7 +22,7 @@ t.Fatalf("NewWatcher failed: %s", err) } - dir, err := ioutil.TempDir("", "inotify") + dir, err := os.MkdirTemp("", "inotify") if err != nil { t.Fatalf("TempDir failed: %s", err) } @@ -115,7 +114,7 @@ t.Fatalf("NewWatcher failed: %s", err) } - dir, err := ioutil.TempDir("", "inotify") + dir, err := os.MkdirTemp("", "inotify") if err != nil { t.Fatalf("TempDir failed: %s", err) } diff -Nru snapd-2.62+23.10/osutil/io_test.go snapd-2.63+23.10/osutil/io_test.go --- snapd-2.62+23.10/osutil/io_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/io_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "errors" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -50,7 +49,7 @@ c.Check(p, testutil.FileEquals, "canary") // no files left behind! - d, err := ioutil.ReadDir(tmpdir) + d, err := os.ReadDir(tmpdir) c.Assert(err, IsNil) c.Assert(len(d), Equals, 1) } @@ -509,12 +508,12 @@ err = osutil.AtomicRename(src+"/", dst+"/") c.Assert(err, IsNil) - d, err := ioutil.ReadDir(dst) + d, err := os.ReadDir(dst) c.Assert(err, IsNil) c.Assert(len(d), Equals, 1) c.Assert(d[0].Name(), Equals, "file") - data, err := ioutil.ReadFile(filepath.Join(dst, "file")) + data, err := os.ReadFile(filepath.Join(dst, "file")) c.Assert(err, IsNil) c.Assert(data, DeepEquals, contents) diff -Nru snapd-2.62+23.10/osutil/kcmdline/kcmdline.go snapd-2.63+23.10/osutil/kcmdline/kcmdline.go --- snapd-2.62+23.10/osutil/kcmdline/kcmdline.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/kcmdline/kcmdline.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,7 @@ "bytes" "errors" "fmt" - "io/ioutil" + "os" "strings" "github.com/snapcore/snapd/osutil" @@ -349,7 +349,7 @@ // KernelCommandLine returns the command line reported by the running kernel. func KernelCommandLine() (string, error) { - buf, err := ioutil.ReadFile(procCmdline) + buf, err := os.ReadFile(procCmdline) if err != nil { return "", err } diff -Nru snapd-2.62+23.10/osutil/mkfs/mkfs.go snapd-2.63+23.10/osutil/mkfs/mkfs.go --- snapd-2.62+23.10/osutil/mkfs/mkfs.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/mkfs/mkfs.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -185,7 +184,7 @@ // mkfs.vfat does not know how to populate the filesystem with contents, // we need to do the work ourselves - fis, err := ioutil.ReadDir(contentsRootDir) + fis, err := os.ReadDir(contentsRootDir) if err != nil { return fmt.Errorf("cannot list directory contents: %v", err) } diff -Nru snapd-2.62+23.10/osutil/mountinfo_linux.go snapd-2.63+23.10/osutil/mountinfo_linux.go --- snapd-2.62+23.10/osutil/mountinfo_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/mountinfo_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "bytes" "fmt" "io" - "io/ioutil" "os" "sort" "strconv" @@ -97,7 +96,7 @@ } func MockMountInfo(content string) (restore func()) { - return mockMountInfo(func() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewBufferString(content)), nil }) + return mockMountInfo(func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewBufferString(content)), nil }) } // LoadMountInfo loads list of mounted entries from /proc/self/mountinfo. This diff -Nru snapd-2.62+23.10/osutil/nfs.go snapd-2.63+23.10/osutil/nfs.go --- snapd-2.62+23.10/osutil/nfs.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/nfs.go 2024-04-24 00:00:39.000000000 +0000 @@ -19,17 +19,17 @@ package osutil -func IsHomeUsingNFS() (bool, error) { - return isHomeUsingNFS() +func IsHomeUsingRemoteFS() (bool, error) { + return isHomeUsingRemoteFS() } -// MockIsHomeUsingNFS mocks the real implementation of osutil.IsHomeUsingNFS. +// MockIsHomeUsingRemoteFS mocks the real implementation of osutil.IsHomeUsingRemoteFS. // This is exported so that other packages that indirectly interact with this -// functionality can mock IsHomeUsingNFS. -func MockIsHomeUsingNFS(new func() (bool, error)) (restore func()) { - old := isHomeUsingNFS - isHomeUsingNFS = new +// functionality can mock IsHomeUsingRemoteFS. +func MockIsHomeUsingRemoteFS(new func() (bool, error)) (restore func()) { + old := isHomeUsingRemoteFS + isHomeUsingRemoteFS = new return func() { - isHomeUsingNFS = old + isHomeUsingRemoteFS = old } } diff -Nru snapd-2.62+23.10/osutil/nfs_darwin.go snapd-2.63+23.10/osutil/nfs_darwin.go --- snapd-2.62+23.10/osutil/nfs_darwin.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/nfs_darwin.go 2024-04-24 00:00:39.000000000 +0000 @@ -19,7 +19,7 @@ package osutil -// isHomeUsingNFS is not implemented on darwin -var isHomeUsingNFS = func() (bool, error) { +// isHomeUsingRemoteFS is not implemented on darwin +var isHomeUsingRemoteFS = func() (bool, error) { return false, ErrDarwin } diff -Nru snapd-2.62+23.10/osutil/nfs_linux.go snapd-2.63+23.10/osutil/nfs_linux.go --- snapd-2.62+23.10/osutil/nfs_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/nfs_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,19 +26,22 @@ var etcFstab = "/etc/fstab" -// isHomeUsingNFS returns true if NFS mounts are defined or mounted under /home. +// isHomeUsingRemoteFS informs if remote filesystems are defined or mounted under /home. // // Internally /proc/self/mountinfo and /etc/fstab are interrogated (for current -// and possible mounted filesystems). If either of those describes NFS +// and possible mounted filesystems). If either of those describes NFS // filesystem mounted under or beneath /home/ then the return value is true. -var isHomeUsingNFS = func() (bool, error) { +var isHomeUsingRemoteFS = func() (bool, error) { mountinfo, err := LoadMountInfo() if err != nil { return false, fmt.Errorf("cannot parse mountinfo: %s", err) } for _, entry := range mountinfo { - if (entry.FsType == "nfs4" || entry.FsType == "nfs" || entry.FsType == "autofs") && (strings.HasPrefix(entry.MountDir, "/home/") || entry.MountDir == "/home") { - return true, nil + switch entry.FsType { + case "nfs4", "nfs", "autofs", "cifs": + if strings.HasPrefix(entry.MountDir, "/home/") || entry.MountDir == "/home" { + return true, nil + } } } fstab, err := LoadMountProfile(etcFstab) @@ -46,8 +49,11 @@ return false, fmt.Errorf("cannot parse %s: %s", etcFstab, err) } for _, entry := range fstab.Entries { - if (entry.Type == "nfs4" || entry.Type == "nfs") && (strings.HasPrefix(entry.Dir, "/home/") || entry.Dir == "/home") { - return true, nil + switch entry.Type { + case "nfs4", "nfs", "autofs", "cifs": + if strings.HasPrefix(entry.Dir, "/home/") || entry.Dir == "/home" { + return true, nil + } } } return false, nil diff -Nru snapd-2.62+23.10/osutil/nfs_linux_test.go snapd-2.63+23.10/osutil/nfs_linux_test.go --- snapd-2.62+23.10/osutil/nfs_linux_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/nfs_linux_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -29,10 +29,10 @@ var _ = Suite(&nfsSuite{}) -func (s *nfsSuite) TestIsHomeUsingNFS(c *C) { +func (s *nfsSuite) TestIsHomeUsingRemoteFS(c *C) { cases := []struct { mountinfo, fstab string - nfs bool + isRemoteFS bool errorPattern string }{{ // Errors from parsing mountinfo and fstab are propagated. @@ -43,14 +43,14 @@ errorPattern: "cannot parse .*/fstab.*, .*", }, { // NFSv3 {tcp,udp} and NFSv4 currently mounted at /home/zyga/nfs are recognized. - mountinfo: "1074 28 0:59 / /home/zyga/nfs rw,relatime shared:342 - nfs localhost:/srv/nfs rw,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=127.0.0.1,mountvers=3,mountport=54125,mountproto=tcp,local_lock=none,addr=127.0.0.1", - nfs: true, + mountinfo: "1074 28 0:59 / /home/zyga/nfs rw,relatime shared:342 - nfs localhost:/srv/nfs rw,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=127.0.0.1,mountvers=3,mountport=54125,mountproto=tcp,local_lock=none,addr=127.0.0.1", + isRemoteFS: true, }, { - mountinfo: "1074 28 0:59 / /home/zyga/nfs rw,relatime shared:342 - nfs localhost:/srv/nfs rw,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=udp,timeo=11,retrans=3,sec=sys,mountaddr=127.0.0.1,mountvers=3,mountport=47875,mountproto=udp,local_lock=none,addr=127.0.0.1", - nfs: true, + mountinfo: "1074 28 0:59 / /home/zyga/nfs rw,relatime shared:342 - nfs localhost:/srv/nfs rw,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=udp,timeo=11,retrans=3,sec=sys,mountaddr=127.0.0.1,mountvers=3,mountport=47875,mountproto=udp,local_lock=none,addr=127.0.0.1", + isRemoteFS: true, }, { - mountinfo: "680 27 0:59 / /home/zyga/nfs rw,relatime shared:478 - nfs4 localhost:/srv/nfs rw,vers=4.2,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1", - nfs: true, + mountinfo: "680 27 0:59 / /home/zyga/nfs rw,relatime shared:478 - nfs4 localhost:/srv/nfs rw,vers=4.2,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1", + isRemoteFS: true, }, { // NFSv3 {tcp,udp} and NFSv4 currently mounted at /home/zyga/nfs are ignored (not in $HOME). mountinfo: "1074 28 0:59 / /mnt/nfs rw,relatime shared:342 - nfs localhost:/srv/nfs rw,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=127.0.0.1,mountvers=3,mountport=54125,mountproto=tcp,local_lock=none,addr=127.0.0.1", @@ -62,24 +62,34 @@ // NFS that may be mounted at /home and /home/zyga/nfs is recognized. // Two spellings are possible, "nfs" and "nfs4" (they are equivalent // nowadays). - fstab: "localhost:/srv/nfs /home nfs defaults 0 0", - nfs: true, + fstab: "localhost:/srv/nfs /home nfs defaults 0 0", + isRemoteFS: true, }, { - fstab: "localhost:/srv/nfs /home nfs4 defaults 0 0", - nfs: true, + fstab: "localhost:/srv/nfs /home nfs4 defaults 0 0", + isRemoteFS: true, }, { - fstab: "localhost:/srv/nfs /home/zyga/nfs nfs defaults 0 0", - nfs: true, + fstab: "localhost:/srv/nfs /home/zyga/nfs nfs defaults 0 0", + isRemoteFS: true, }, { - fstab: "localhost:/srv/nfs /home/zyga/nfs nfs4 defaults 0 0", - nfs: true, + fstab: "localhost:/srv/nfs /home/zyga/nfs nfs4 defaults 0 0", + isRemoteFS: true, }, { // NFS that may be mounted at /mnt/nfs is ignored (not in $HOME). fstab: "localhost:/srv/nfs /mnt/nfs nfs defaults 0 0", }, { // autofs that is mounted at /home. - mountinfo: "137 29 0:50 / /home rw,relatime shared:87 - autofs /etc/auto.master.d/home rw,fd=7,pgrp=22588,timeout=300,minproto=5,maxproto=5,indirect,pipe_ino=173399", - nfs: true, + mountinfo: "137 29 0:50 / /home rw,relatime shared:87 - autofs /etc/auto.master.d/home rw,fd=7,pgrp=22588,timeout=300,minproto=5,maxproto=5,indirect,pipe_ino=173399", + isRemoteFS: true, + }, { + // cifs that is mounted at /home + // This is not real data, it is made-up. + mountinfo: "0 0 0:0 / /home rw,relatime shared:0 - cifs //sub.example.org/path$/all-users irrelevant-options", + isRemoteFS: true, + }, { + // cifs that is mounted at /home/$USERNAME + // This is not real data, it is made-up. + mountinfo: "0 0 0:0 / /home/some-user rw,relatime shared:0 - cifs //sub.example.org/path$/some-user irrelevant-options", + isRemoteFS: true, }} for _, tc := range cases { restore := osutil.MockMountInfo(tc.mountinfo) @@ -87,12 +97,12 @@ restore = osutil.MockEtcFstab(tc.fstab) defer restore() - nfs, err := osutil.IsHomeUsingNFS() + isRemoteFS, err := osutil.IsHomeUsingRemoteFS() if tc.errorPattern != "" { c.Assert(err, ErrorMatches, tc.errorPattern, Commentf("test case %#v", tc)) } else { c.Assert(err, IsNil) } - c.Assert(nfs, Equals, tc.nfs) + c.Assert(isRemoteFS, Equals, tc.isRemoteFS) } } diff -Nru snapd-2.62+23.10/osutil/strace/timing_test.go snapd-2.63+23.10/osutil/strace/timing_test.go --- snapd-2.62+23.10/osutil/strace/timing_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/strace/timing_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "bytes" - "io/ioutil" "os" . "gopkg.in/check.v1" @@ -118,7 +117,7 @@ `) func (s *timingSuite) TestTraceExecveTimings(c *C) { - f, err := ioutil.TempFile("", "strace-extract-test-") + f, err := os.CreateTemp("", "strace-extract-test-") c.Assert(err, IsNil) defer os.Remove(f.Name()) _, err = f.Write(sampleStraceSimple) diff -Nru snapd-2.62+23.10/osutil/syncdir.go snapd-2.63+23.10/osutil/syncdir.go --- snapd-2.62+23.10/osutil/syncdir.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/syncdir.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -40,7 +39,7 @@ } func (sym SymlinkFileState) State() (io.ReadCloser, int64, os.FileMode, error) { - return ioutil.NopCloser(bytes.NewReader([]byte(sym.Target))), int64(len(sym.Target)), os.ModeSymlink, nil + return io.NopCloser(bytes.NewReader([]byte(sym.Target))), int64(len(sym.Target)), os.ModeSymlink, nil } // FileReference describes the desired content by referencing an existing file. @@ -93,7 +92,7 @@ if !blob.Mode.IsRegular() { return nil, 0, os.FileMode(0), fmt.Errorf("internal error: only regular files are supported, got %q instead", blob.Mode.Type()) } - return ioutil.NopCloser(bytes.NewReader(blob.Content)), int64(len(blob.Content)), blob.Mode, nil + return io.NopCloser(bytes.NewReader(blob.Content)), int64(len(blob.Content)), blob.Mode, nil } // ErrSameState is returned when the state of a file has not changed. @@ -264,7 +263,7 @@ return false, err } defer readerA.Close() - buf, err := ioutil.ReadAll(readerA) + buf, err := io.ReadAll(readerA) if err != nil { return false, err } @@ -302,7 +301,7 @@ if err != nil { return err } - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { return err } diff -Nru snapd-2.62+23.10/osutil/udev/crawler/device.go snapd-2.63+23.10/osutil/udev/crawler/device.go --- snapd-2.62+23.10/osutil/udev/crawler/device.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/udev/crawler/device.go 2024-04-24 00:00:39.000000000 +0000 @@ -4,7 +4,7 @@ "bufio" "bytes" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "strings" @@ -90,7 +90,7 @@ defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/osutil/udev/main.go.sample snapd-2.63+23.10/osutil/udev/main.go.sample --- snapd-2.62+23.10/osutil/udev/main.go.sample 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/osutil/udev/main.go.sample 2024-04-24 00:00:39.000000000 +0000 @@ -4,7 +4,6 @@ "encoding/json" "flag" "fmt" - "io/ioutil" "log" "os" "os/signal" @@ -127,7 +126,7 @@ return nil, nil } - stream, err := ioutil.ReadFile(*filePath) + stream, err := os.ReadFile(*filePath) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/overlord/configstate/configcore/cloud.go snapd-2.63+23.10/overlord/configstate/configcore/cloud.go --- snapd-2.62+23.10/overlord/configstate/configcore/cloud.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/configstate/configcore/cloud.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ import ( "encoding/json" "errors" - "io/ioutil" "os" "github.com/snapcore/snapd/dirs" @@ -91,7 +90,7 @@ return nil } - data, err := ioutil.ReadFile(dirs.CloudInstanceDataFile) + data, err := os.ReadFile(dirs.CloudInstanceDataFile) if os.IsNotExist(err) { // nothing to do return nil diff -Nru snapd-2.62+23.10/overlord/configstate/configcore/homedirs_test.go snapd-2.63+23.10/overlord/configstate/configcore/homedirs_test.go --- snapd-2.62+23.10/overlord/configstate/configcore/homedirs_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/configstate/configcore/homedirs_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "errors" - "io/ioutil" "os" "path/filepath" "strings" @@ -239,7 +238,7 @@ // Check that the config file has been written configPath := filepath.Join(dirs.SnapdStateDir(dirs.GlobalRootDir), "system-params") - contents, err := ioutil.ReadFile(configPath) + contents, err := os.ReadFile(configPath) c.Assert(err, IsNil) c.Check(string(contents), Equals, "homedirs=/home/existingDir\n") diff -Nru snapd-2.62+23.10/overlord/configstate/configcore/tmp.go snapd-2.63+23.10/overlord/configstate/configcore/tmp.go --- snapd-2.62+23.10/overlord/configstate/configcore/tmp.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/configstate/configcore/tmp.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -108,7 +107,7 @@ Content: []byte(content), Mode: 0644, } - oldContent, err := ioutil.ReadFile(cfgFilePath) + oldContent, err := os.ReadFile(cfgFilePath) if err == nil && content == string(oldContent) { modify = false } diff -Nru snapd-2.62+23.10/overlord/devicestate/crypto.go snapd-2.63+23.10/overlord/devicestate/crypto.go --- snapd-2.62+23.10/overlord/devicestate/crypto.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/crypto.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "crypto/x509" "encoding/pem" "errors" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -34,9 +33,9 @@ func generateRSAKey(keyLength int) (*rsa.PrivateKey, error) { // The temporary directory is created with mode - // 0700 by ioutil.TempDir, see: - // https://github.com/golang/go/blob/master/src/io/ioutil/tempfile.go#L84 - tempDir, err := ioutil.TempDir(os.TempDir(), "snapd") + // 0700 by os.MkdirTemp, see: + // https://github.com/golang/go/blob/3b29222ffdcaea70842ed167632468f54a1783ae/src/os/tempfile.go#L98 + tempDir, err := os.MkdirTemp(os.TempDir(), "snapd") if err != nil { return nil, err } @@ -51,7 +50,7 @@ return nil, osutil.OutputErr(out, err) } - d, err := ioutil.ReadFile(rsaKeyFile) + d, err := os.ReadFile(rsaKeyFile) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/overlord/devicestate/devicestate_gadget_test.go snapd-2.63+23.10/overlord/devicestate/devicestate_gadget_test.go --- snapd-2.62+23.10/overlord/devicestate/devicestate_gadget_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/devicestate_gadget_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -751,7 +751,7 @@ expectedRollbackDir := filepath.Join(dirs.SnapRollbackDir, "foo-gadget_34") updaterForStructureCalls := 0 - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, _ gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, _ gadget.ContentUpdateObserver) (gadget.Updater, error) { updaterForStructureCalls++ c.Assert(loc, Equals, gadget.StructureLocation{ diff -Nru snapd-2.62+23.10/overlord/devicestate/devicestate_install_mode_test.go snapd-2.63+23.10/overlord/devicestate/devicestate_install_mode_test.go --- snapd-2.62+23.10/overlord/devicestate/devicestate_install_mode_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/devicestate_install_mode_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "time" @@ -1111,7 +1111,7 @@ }) c.Assert(err, IsNil) c.Check(filepath.Join(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data/var/lib/snapd/device/fde"), "ubuntu-save.key"), testutil.FileEquals, []byte(saveKey)) - marker, err := ioutil.ReadFile(filepath.Join(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data/var/lib/snapd/device/fde"), "marker")) + marker, err := os.ReadFile(filepath.Join(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data/var/lib/snapd/device/fde"), "marker")) c.Assert(err, IsNil) c.Check(marker, HasLen, 32) c.Check(filepath.Join(boot.InstallHostFDESaveDir, "marker"), testutil.FileEquals, marker) @@ -1514,7 +1514,7 @@ defer f.Close() gz, err := gzip.NewReader(f) c.Assert(err, IsNil) - content, err := ioutil.ReadAll(gz) + content, err := io.ReadAll(gz) c.Assert(err, IsNil) c.Check(string(content), Equals, `---- Output of: snap changes mock output of: snap changes diff -Nru snapd-2.62+23.10/overlord/devicestate/devicestate_remodel_test.go snapd-2.63+23.10/overlord/devicestate/devicestate_remodel_test.go --- snapd-2.62+23.10/overlord/devicestate/devicestate_remodel_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/devicestate_remodel_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -2929,8 +2929,8 @@ c.Assert(tPrepareKernel.Kind(), Equals, "prepare-snap") c.Assert(tPrepareKernel.Summary(), Equals, `Prepare snap "pc-kernel-new" (222) for remodel`) c.Assert(tPrepareKernel.WaitTasks(), HasLen, 0) - c.Assert(tSetupKernelSnap.Kind(), Equals, "setup-kernel-snap") - c.Assert(tSetupKernelSnap.Summary(), Equals, `Setup kernel driver tree for "pc-kernel-new" (222) for remodel`) + c.Assert(tSetupKernelSnap.Kind(), Equals, "prepare-kernel-snap") + c.Assert(tSetupKernelSnap.Summary(), Equals, `Prepare kernel driver tree for "pc-kernel-new" (222) for remodel`) c.Assert(tLinkKernel.Kind(), Equals, "link-snap") c.Assert(tLinkKernel.Summary(), Equals, `Make snap "pc-kernel-new" (222) available to the system during remodel`) c.Assert(tUpdateAssetsKernel.Kind(), Equals, "update-gadget-assets") @@ -3186,8 +3186,8 @@ c.Assert(tSwitchKernel.Kind(), Equals, "switch-snap") c.Assert(tSwitchKernel.Summary(), Equals, `Switch snap "pc-kernel-new" from channel "20/stable" to "20/edge"`) c.Assert(tSwitchKernel.WaitTasks(), HasLen, 0) - c.Assert(tSetupKernelSnap.Kind(), Equals, "setup-kernel-snap") - c.Assert(tSetupKernelSnap.Summary(), Equals, `Setup kernel driver tree for "pc-kernel-new" (222) for remodel`) + c.Assert(tSetupKernelSnap.Kind(), Equals, "prepare-kernel-snap") + c.Assert(tSetupKernelSnap.Summary(), Equals, `Prepare kernel driver tree for "pc-kernel-new" (222) for remodel`) c.Assert(tLinkKernel.Kind(), Equals, "link-snap") c.Assert(tLinkKernel.Summary(), Equals, `Make snap "pc-kernel-new" (222) available to the system during remodel`) c.Assert(tUpdateAssetsKernel.Kind(), Equals, "update-gadget-assets") @@ -3446,8 +3446,8 @@ c.Assert(tPrepareKernel.Kind(), Equals, "prepare-snap") c.Assert(tPrepareKernel.Summary(), Equals, `Prepare snap "pc-kernel-new" (222) for remodel`) c.Assert(tPrepareKernel.WaitTasks(), HasLen, 0) - c.Assert(tSetupKernelSnap.Kind(), Equals, "setup-kernel-snap") - c.Assert(tSetupKernelSnap.Summary(), Equals, `Setup kernel driver tree for "pc-kernel-new" (222) for remodel`) + c.Assert(tSetupKernelSnap.Kind(), Equals, "prepare-kernel-snap") + c.Assert(tSetupKernelSnap.Summary(), Equals, `Prepare kernel driver tree for "pc-kernel-new" (222) for remodel`) c.Assert(tLinkKernel.Kind(), Equals, "link-snap") c.Assert(tLinkKernel.Summary(), Equals, `Make snap "pc-kernel-new" (222) available to the system during remodel`) c.Assert(tUpdateAssetsKernel.Kind(), Equals, "update-gadget-assets") @@ -3801,8 +3801,8 @@ c.Assert(tSwitchChannelKernel.Kind(), Equals, "switch-snap-channel") c.Assert(tSwitchChannelKernel.Summary(), Equals, `Switch pc-kernel-new channel to 20/stable`) c.Assert(tSwitchChannelKernel.WaitTasks(), HasLen, 0) - c.Assert(tSetupKernelSnap.Kind(), Equals, "setup-kernel-snap") - c.Assert(tSetupKernelSnap.Summary(), Equals, `Setup kernel driver tree for "pc-kernel-new" (222) for remodel`) + c.Assert(tSetupKernelSnap.Kind(), Equals, "prepare-kernel-snap") + c.Assert(tSetupKernelSnap.Summary(), Equals, `Prepare kernel driver tree for "pc-kernel-new" (222) for remodel`) c.Assert(tUpdateAssetsFromKernel.Kind(), Equals, "update-gadget-assets") c.Assert(tUpdateAssetsFromKernel.Summary(), Equals, `Update assets from kernel "pc-kernel-new" (222) for remodel`) c.Assert(tLinkKernel.Kind(), Equals, "link-snap") diff -Nru snapd-2.62+23.10/overlord/devicestate/handlers.go snapd-2.63+23.10/overlord/devicestate/handlers.go --- snapd-2.62+23.10/overlord/devicestate/handlers.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/handlers.go 2024-04-24 00:00:39.000000000 +0000 @@ -27,13 +27,10 @@ "gopkg.in/tomb.v2" "github.com/snapcore/snapd/interfaces" - "github.com/snapcore/snapd/kernel" "github.com/snapcore/snapd/logger" - "github.com/snapcore/snapd/osutil" "github.com/snapcore/snapd/overlord/restart" "github.com/snapcore/snapd/overlord/snapstate" "github.com/snapcore/snapd/overlord/state" - "github.com/snapcore/snapd/snap" ) func (m *DeviceManager) doMarkPreseeded(t *state.Task, _ *tomb.Tomb) error { @@ -75,23 +72,6 @@ if _, err := exec.Command("umount", "-d", "-l", info.MountDir()).CombinedOutput(); err != nil { return err } - // Remove early mount for the kernel snap - if tp, _ := snapSt.Type(); tp == snap.TypeKernel { - earlyMntPt := kernel.EarlyKernelMountDir(info.RealName, info.Revision) - mounted, err := osutil.IsMounted(earlyMntPt) - if err != nil { - return fmt.Errorf("cannot check early kernel mount point: %w", err) - } - if mounted { - logger.Debugf("unmount early kernel mount at %s", earlyMntPt) - if _, err := exec.Command("umount", "-d", "-l", - earlyMntPt).CombinedOutput(); err != nil { - return err - } - } else { - logger.Debugf("early kernel not mounted") - } - } } st.Set("preseeded", preseeded) diff -Nru snapd-2.62+23.10/overlord/devicestate/handlers_install.go snapd-2.63+23.10/overlord/devicestate/handlers_install.go --- snapd-2.62+23.10/overlord/devicestate/handlers_install.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/handlers_install.go 2024-04-24 00:00:39.000000000 +0000 @@ -27,7 +27,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -818,7 +817,7 @@ } func rotateEncryptionKeys() error { - kd, err := ioutil.ReadFile(filepath.Join(dirs.SnapFDEDir, "ubuntu-save.key")) + kd, err := os.ReadFile(filepath.Join(dirs.SnapFDEDir, "ubuntu-save.key")) if err != nil { return fmt.Errorf("cannot open encryption key file: %v", err) } diff -Nru snapd-2.62+23.10/overlord/devicestate/handlers_systems.go snapd-2.63+23.10/overlord/devicestate/handlers_systems.go --- snapd-2.62+23.10/overlord/devicestate/handlers_systems.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/devicestate/handlers_systems.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -84,7 +83,7 @@ if !strings.HasPrefix(filepath.Dir(fileName), boot.InitramfsUbuntuSeedDir+"/") { return fmt.Errorf("internal error: unexpected recovery system snap location %q", fileName) } - currentLog, err := ioutil.ReadFile(logfile) + currentLog, err := os.ReadFile(logfile) if err != nil && !os.IsNotExist(err) { return err } diff -Nru snapd-2.62+23.10/overlord/hookstate/ctlcmd/ctlcmd.go snapd-2.63+23.10/overlord/hookstate/ctlcmd/ctlcmd.go --- snapd-2.62+23.10/overlord/hookstate/ctlcmd/ctlcmd.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/hookstate/ctlcmd/ctlcmd.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,6 +24,7 @@ "bytes" "fmt" "io" + "strconv" "github.com/jessevdk/go-flags" @@ -45,12 +46,17 @@ stderr io.Writer c *hookstate.Context name string + uid string } func (c *baseCommand) setName(name string) { c.name = name } +func (c *baseCommand) setUid(uid uint32) { + c.uid = strconv.FormatUint(uint64(uid), 10) +} + func (c *baseCommand) setStdout(w io.Writer) { c.stdout = w } @@ -88,6 +94,7 @@ type command interface { setName(name string) + setUid(uid uint32) setStdout(w io.Writer) setStderr(w io.Writer) @@ -157,6 +164,7 @@ for name, cmdInfo := range commands { cmd := cmdInfo.generator() cmd.setName(name) + cmd.setUid(uid) cmd.setStdout(&stdoutBuffer) cmd.setStderr(&stderrBuffer) cmd.setContext(context) diff -Nru snapd-2.62+23.10/overlord/hookstate/ctlcmd/export_test.go snapd-2.63+23.10/overlord/hookstate/ctlcmd/export_test.go --- snapd-2.62+23.10/overlord/hookstate/ctlcmd/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/hookstate/ctlcmd/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,10 +20,12 @@ package ctlcmd import ( + "context" "fmt" "os/user" "github.com/snapcore/snapd/asserts" + "github.com/snapcore/snapd/client/clientutil" "github.com/snapcore/snapd/overlord/devicestate" "github.com/snapcore/snapd/overlord/hookstate" "github.com/snapcore/snapd/overlord/servicestate" @@ -156,3 +158,9 @@ autoRefreshForGatingSnap = old } } + +func MockNewStatusDecorator(f func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator) (restore func()) { + restore = testutil.Backup(&newStatusDecorator) + newStatusDecorator = f + return restore +} diff -Nru snapd-2.62+23.10/overlord/hookstate/ctlcmd/services.go snapd-2.63+23.10/overlord/hookstate/ctlcmd/services.go --- snapd-2.62+23.10/overlord/hookstate/ctlcmd/services.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/hookstate/ctlcmd/services.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,6 +20,7 @@ package ctlcmd import ( + "context" "fmt" "sort" "text/tabwriter" @@ -47,6 +48,8 @@ Positional struct { ServiceNames []string `positional-arg-name:""` } `positional-args:"yes"` + Global bool `long:"global" short:"g" description:"Show the global enable status for user services instead of the status for the current user"` + User bool `long:"user" short:"u" description:"Show the current status of the user services instead of the global enable status"` } type byApp []*snap.AppInfo @@ -57,21 +60,52 @@ return a[i].Name < a[j].Name } +var newStatusDecorator = func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator { + if isGlobal { + return servicestate.NewStatusDecorator(progress.Null) + } else { + return servicestate.NewStatusDecoratorForUid(progress.Null, ctx, uid) + } +} + +func (c *servicesCommand) showGlobalEnablement() bool { + if c.uid == "0" && !c.User { + return true + } else if c.uid != "0" && c.Global { + return true + } + return false +} + +func (c *servicesCommand) validateArguments() error { + // can't use --global and --user together + if c.Global && c.User { + return fmt.Errorf(i18n.G("cannot combine --global and --user switches.")) + } + return nil +} + +// The 'snapctl services' command is one of the few commands that can run as +// non-root through snapctl. func (c *servicesCommand) Execute([]string) error { - context, err := c.ensureContext() + ctx, err := c.ensureContext() if err != nil { return err } - st := context.State() - svcInfos, err := getServiceInfos(st, context.InstanceName(), c.Positional.ServiceNames) + if err := c.validateArguments(); err != nil { + return err + } + + st := ctx.State() + svcInfos, err := getServiceInfos(st, ctx.InstanceName(), c.Positional.ServiceNames) if err != nil { return err } sort.Sort(byApp(svcInfos)) - sd := servicestate.NewStatusDecorator(progress.Null) - + isGlobal := c.showGlobalEnablement() + sd := newStatusDecorator(context.TODO(), isGlobal, c.uid) services, err := clientutil.ClientAppInfosFromSnapAppInfos(svcInfos, sd) if err != nil || len(services) == 0 { return err @@ -81,19 +115,8 @@ defer w.Flush() fmt.Fprintln(w, i18n.G("Service\tStartup\tCurrent\tNotes")) - for _, svc := range services { - startup := i18n.G("disabled") - if svc.Enabled { - startup = i18n.G("enabled") - } - current := i18n.G("inactive") - if svc.DaemonScope == snap.UserDaemon { - current = "-" - } else if svc.Active { - current = i18n.G("active") - } - fmt.Fprintf(w, "%s.%s\t%s\t%s\t%s\n", svc.Snap, svc.Name, startup, current, clientutil.ClientAppInfoNotes(&svc)) + fmt.Fprintln(w, clientutil.FmtServiceStatus(&svc, isGlobal)) } return nil diff -Nru snapd-2.62+23.10/overlord/hookstate/ctlcmd/services_test.go snapd-2.63+23.10/overlord/hookstate/ctlcmd/services_test.go --- snapd-2.62+23.10/overlord/hookstate/ctlcmd/services_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/hookstate/ctlcmd/services_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -30,6 +30,7 @@ "github.com/snapcore/snapd/asserts" "github.com/snapcore/snapd/asserts/snapasserts" "github.com/snapcore/snapd/client" + "github.com/snapcore/snapd/client/clientutil" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/interfaces" "github.com/snapcore/snapd/overlord/auth" @@ -98,12 +99,19 @@ return snaps, nil, nil } +type appsSuiteDecoratorResult struct { + daemonType string + active bool + enabled bool +} + type servicectlSuite struct { testutil.BaseTest - st *state.State - fakeStore fakeStore - mockContext *hookstate.Context - mockHandler *hooktest.MockHandler + st *state.State + fakeStore fakeStore + mockContext *hookstate.Context + mockHandler *hooktest.MockHandler + decoratorResults map[string]appsSuiteDecoratorResult } var _ = Suite(&servicectlSuite{}) @@ -746,6 +754,96 @@ c.Check(string(stderr), Equals, "") } +func (s *servicectlSuite) TestServicesAsUserWithGlobal(c *C) { + restore := systemd.MockSystemctl(func(args ...string) (buf []byte, err error) { + c.Assert(args[0], Equals, "show") + c.Check(args[2], Equals, "snap.test-snap.test-service.service") + return []byte(`Id=snap.test-snap.test-service.service +Names=snap.test-snap.test-service.service +Type=simple +ActiveState=active +UnitFileState=enabled +NeedDaemonReload=no +`), nil + }) + defer restore() + + stdout, stderr, err := ctlcmd.Run(s.mockContext, []string{"services", "--global", "test-snap.test-service"}, 1337) + c.Assert(err, IsNil) + c.Check(string(stdout), Equals, ` +Service Startup Current Notes +test-snap.test-service enabled active - +`[1:]) + c.Check(string(stderr), Equals, "") +} + +func (s *servicectlSuite) DecorateWithStatus(appInfo *client.AppInfo, snapApp *snap.AppInfo) error { + name := snapApp.Snap.RealName + "." + appInfo.Name + dec, ok := s.decoratorResults[name] + if !ok { + return fmt.Errorf("%s not found in expected test decorator results", name) + } + appInfo.Daemon = dec.daemonType + appInfo.Enabled = dec.enabled + appInfo.Active = dec.active + return nil +} + +func (s *servicectlSuite) TestServicesUserSwitch(c *C) { + restore := ctlcmd.MockNewStatusDecorator(func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator { + c.Check(isGlobal, Equals, false) + c.Check(uid, Equals, "0") + return s + }) + defer restore() + + s.decoratorResults = map[string]appsSuiteDecoratorResult{ + "test-snap.user-service": { + daemonType: "simple", + active: true, + enabled: true, + }, + } + + stdout, stderr, err := ctlcmd.Run(s.mockContext, []string{"services", "--user", "test-snap.user-service"}, 0) + c.Assert(err, IsNil) + c.Check(string(stdout), Equals, ` +Service Startup Current Notes +test-snap.user-service enabled active user +`[1:]) + c.Check(string(stderr), Equals, "") +} + +func (s *servicectlSuite) TestServicesAsUser(c *C) { + restore := ctlcmd.MockNewStatusDecorator(func(ctx context.Context, isGlobal bool, uid string) clientutil.StatusDecorator { + c.Check(isGlobal, Equals, false) + c.Check(uid, Equals, "1337") + return s + }) + defer restore() + + s.decoratorResults = map[string]appsSuiteDecoratorResult{ + "test-snap.user-service": { + daemonType: "simple", + active: true, + enabled: true, + }, + } + + stdout, stderr, err := ctlcmd.Run(s.mockContext, []string{"services", "test-snap.user-service"}, 1337) + c.Assert(err, IsNil) + c.Check(string(stdout), Equals, ` +Service Startup Current Notes +test-snap.user-service enabled active user +`[1:]) + c.Check(string(stderr), Equals, "") +} + +func (s *servicectlSuite) TestAppStatusInvalidUserGlobalSwitches(c *C) { + _, _, err := ctlcmd.Run(s.mockContext, []string{"services", "--global", "--user"}, 0) + c.Assert(err, ErrorMatches, "cannot combine --global and --user switches.") +} + func (s *servicectlSuite) TestServicesWithoutContext(c *C) { actions := []string{ "start", diff -Nru snapd-2.62+23.10/overlord/install/install_test.go snapd-2.63+23.10/overlord/install/install_test.go --- snapd-2.62+23.10/overlord/install/install_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/install/install_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "crypto" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -898,13 +897,13 @@ c.Assert(err, IsNil) c.Check(filepath.Join(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data/var/lib/snapd/device/fde"), "ubuntu-save.key"), testutil.FileEquals, []byte(saveKey)) - marker, err := ioutil.ReadFile(filepath.Join(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data/var/lib/snapd/device/fde"), "marker")) + marker, err := os.ReadFile(filepath.Join(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data/var/lib/snapd/device/fde"), "marker")) c.Assert(err, IsNil) c.Check(marker, HasLen, 32) c.Check(filepath.Join(boot.InstallHostFDESaveDir, "marker"), testutil.FileEquals, marker) // the assets cache was written to - l, err := ioutil.ReadDir(filepath.Join(dirs.SnapBootAssetsDir, "trusted")) + l, err := os.ReadDir(filepath.Join(dirs.SnapBootAssetsDir, "trusted")) c.Assert(err, IsNil) c.Assert(l, HasLen, 1) } diff -Nru snapd-2.62+23.10/overlord/managers_test.go snapd-2.63+23.10/overlord/managers_test.go --- snapd-2.62+23.10/overlord/managers_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/managers_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -28,7 +28,6 @@ "errors" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -1098,7 +1097,7 @@ return case "auth:sessions": // quick validity check - reqBody, err := ioutil.ReadAll(r.Body) + reqBody, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(bytes.Contains(reqBody, []byte("nonce: NONCE")), Equals, true) c.Check(bytes.Contains(reqBody, []byte(fmt.Sprintf("serial: %s", s.expectedSerial))), Equals, true) @@ -4935,7 +4934,7 @@ what = "kernel" } if flags&isKernel != 0 && flags&needsKernelSetup != 0 { - c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Setup kernel driver tree for "%s" (%s)`, name, revno)) + c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Prepare kernel driver tree for "%s" (%s)`, name, revno)) i++ } c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Update assets from %s "%s" (%s)`, what, name, revno)) @@ -4952,7 +4951,7 @@ c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Make snap "%s" (%s) available to the system`, name, revno)) i++ if flags&isKernel != 0 && flags&needsKernelSetup != 0 { - c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Cleanup kernel driver tree for "%s" (%s)`, name, revno)) + c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Discard kernel driver tree for "%s" (%s)`, name, revno)) i++ } c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Automatically connect eligible plugs and slots of snap "%s"`, name)) @@ -4996,7 +4995,7 @@ what = "kernel" } if flags&isKernel != 0 && flags&needsKernelSetup != 0 { - c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Setup kernel driver tree for "%s" (%s)`, name, revno)) + c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Prepare kernel driver tree for "%s" (%s)`, name, revno)) i++ } c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Update assets from %s %q (%s)`, what, name, revno)) @@ -5015,7 +5014,7 @@ c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Automatically connect eligible plugs and slots of snap "%s"`, name)) i++ if flags&isKernel != 0 && flags&needsKernelSetup != 0 { - c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Cleanup kernel driver tree for "%s" (%s)`, name, revno)) + c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Discard kernel driver tree for "%s" (%s)`, name, revno)) i++ } c.Assert(tasks[i].Summary(), Equals, fmt.Sprintf(`Set automatic aliases for snap "%s"`, name)) @@ -6683,7 +6682,7 @@ defer r() updaterForStructureCalls := 0 - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { updaterForStructureCalls++ c.Assert(ps.Name(), Equals, "foo") return &mockUpdater{}, nil @@ -8080,7 +8079,7 @@ }) defer r() - restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore := gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { // use a mock updater which does nothing return &mockUpdater{ onUpdate: gadget.ErrNoUpdate, @@ -8373,7 +8372,7 @@ defer r() updater := &mockUpdater{} - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { // use a mock updater pretends an update was applied return updater, nil }) @@ -8561,7 +8560,7 @@ defer r() updater := &mockUpdater{} - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { // use a mock updater pretends an update was applied return updater, nil }) @@ -9038,7 +9037,7 @@ // remodel updates a gadget, setup a mock updater that pretends an // update was applied updater := &mockUpdater{} - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { // use a mock updater pretends an update was applied return updater, nil }) @@ -9498,7 +9497,7 @@ // remodel updates a gadget, setup a mock updater that pretends an // update was applied updater := &mockUpdater{} - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { // use a mock updater pretends an update was applied return updater, nil }) @@ -9806,7 +9805,7 @@ // remodel updates a gadget, setup a mock updater that pretends an // update was applied updater := &mockUpdater{} - restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { + restore = gadget.MockUpdaterForStructure(func(loc gadget.StructureLocation, fromPs, ps *gadget.LaidOutStructure, rootDir, rollbackDir string, observer gadget.ContentUpdateObserver) (gadget.Updater, error) { // use a mock updater pretends an update was applied return updater, nil }) diff -Nru snapd-2.62+23.10/overlord/overlord_test.go snapd-2.63+23.10/overlord/overlord_test.go --- snapd-2.62+23.10/overlord/overlord_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/overlord_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "errors" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -1325,7 +1324,7 @@ } func (ovs *overlordSuite) TestLockWithTimeoutHappy(c *C) { - f, err := ioutil.TempFile("", "testlock-*") + f, err := os.CreateTemp("", "testlock-*") defer func() { f.Close() os.Remove(f.Name()) @@ -1354,7 +1353,7 @@ }) defer restoreNotify() - f, err := ioutil.TempFile("", "testlock-*") + f, err := os.CreateTemp("", "testlock-*") defer func() { f.Close() os.Remove(f.Name()) diff -Nru snapd-2.62+23.10/overlord/patch/patch1.go snapd-2.63+23.10/overlord/patch/patch1.go --- snapd-2.62+23.10/overlord/patch/patch1.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/patch/patch1.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "errors" - "io/ioutil" + "os" "path/filepath" "github.com/snapcore/snapd/logger" @@ -48,7 +48,7 @@ var patch1ReadType = func(name string, rev snap.Revision) (snap.Type, error) { snapYamlFn := filepath.Join(snap.MountDir(name, rev), "meta", "snap.yaml") - meta, err := ioutil.ReadFile(snapYamlFn) + meta, err := os.ReadFile(snapYamlFn) if err != nil { return snap.TypeApp, err } diff -Nru snapd-2.62+23.10/overlord/servicestate/servicestate.go snapd-2.63+23.10/overlord/servicestate/servicestate.go --- snapd-2.62+23.10/overlord/servicestate/servicestate.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/servicestate/servicestate.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,16 +20,19 @@ package servicestate import ( + "context" "errors" "fmt" "io" "os/user" "path/filepath" "sort" + "strconv" "strings" "time" "github.com/snapcore/snapd/client" + "github.com/snapcore/snapd/client/clientutil" "github.com/snapcore/snapd/overlord/cmdstate" "github.com/snapcore/snapd/overlord/configstate/config" "github.com/snapcore/snapd/overlord/hookstate" @@ -40,6 +43,7 @@ "github.com/snapcore/snapd/snap/quota" "github.com/snapcore/snapd/strutil" "github.com/snapcore/snapd/systemd" + usc "github.com/snapcore/snapd/usersession/client" "github.com/snapcore/snapd/wrappers" ) @@ -350,18 +354,38 @@ type StatusDecorator struct { sysd systemd.Systemd globalUserSysd systemd.Systemd + context context.Context + uid string } // NewStatusDecorator returns a new StatusDecorator. +// +// Using NewStatusDecorator will only allow for global status of user-services +// as StatusDecorator is designed to contain a single set of results for a single +// user. func NewStatusDecorator(rep interface { Notify(string) -}) *StatusDecorator { +}) clientutil.StatusDecorator { return &StatusDecorator{ sysd: systemd.New(systemd.SystemMode, rep), globalUserSysd: systemd.New(systemd.GlobalUserMode, rep), } } +// NewStatusDecoratorForUid returns a new StatusDecorator, but configured +// for a specific uid. This allows the StatusDecorator to get statuses for +// user-services for a specific user. +func NewStatusDecoratorForUid(rep interface { + Notify(string) +}, context context.Context, uid string) clientutil.StatusDecorator { + return &StatusDecorator{ + sysd: systemd.New(systemd.SystemMode, rep), + globalUserSysd: systemd.New(systemd.GlobalUserMode, rep), + context: context, + uid: uid, + } +} + func (sd *StatusDecorator) hasEnabledActivator(appInfo *client.AppInfo) bool { // Just one activator should be enabled in order for the service to be able // to become enabled. For slot activated services this is always true as we @@ -374,6 +398,69 @@ return false } +// queryUserServiceStatus returns a list of service-statuses for the configured users. +func (sd *StatusDecorator) queryUserServiceStatus(units []string) ([]*systemd.UnitStatus, error) { + // Avoid any expensive call if there are no user daemons + if len(units) == 0 { + return nil, nil + } + + uid, err := strconv.Atoi(sd.uid) + if err != nil { + return nil, err + } + + cli := usc.NewForUids(uid) + sts, failures, err := cli.ServiceStatus(sd.context, units) + if err != nil { + return nil, err + } + + // Return the first service failure, if any failures were reported + if len(failures[uid]) > 0 { + return nil, fmt.Errorf("cannot retrieve service %q status: %v", + failures[uid][0].Service, failures[uid][0].Error) + } + + // Convert the received unit statuses to systemd-unit statuses + var sysdStatuses []*systemd.UnitStatus + for _, sts := range sts[uid] { + sysdStatuses = append(sysdStatuses, sts.SystemdUnitStatus()) + } + return sysdStatuses, nil +} + +func (sd *StatusDecorator) queryServiceStatus(scope snap.DaemonScope, units []string) ([]*systemd.UnitStatus, error) { + var sts []*systemd.UnitStatus + var err error + switch scope { + case snap.SystemDaemon: + // sysd.Status() makes sure that we get only the units we asked + // for and raises an error otherwise. + sts, err = sd.sysd.Status(units) + case snap.UserDaemon: + // Support the previous behavior of retrieving the global enablement + // status of user services if no uid is configured for this status + // decorator. + if sd.uid != "" { + sts, err = sd.queryUserServiceStatus(units) + } else { + sts, err = sd.globalUserSysd.Status(units) + } + default: + return nil, fmt.Errorf("internal error: unknown daemon-scope %q", scope) + } + if err != nil { + return nil, err + } + + // ensure we get the correct unit count, otherwise report an error. + if len(sts) != len(units) { + return nil, fmt.Errorf("expected %d results, got %d", len(units), len(sts)) + } + return sts, nil +} + // DecorateWithStatus adds service status information to the given // client.AppInfo associated with the given snap.AppInfo. // If the snap is inactive or the app is not service it does nothing. @@ -385,15 +472,6 @@ // nothing to do return nil } - var sysd systemd.Systemd - switch snapApp.DaemonScope { - case snap.SystemDaemon: - sysd = sd.sysd - case snap.UserDaemon: - sysd = sd.globalUserSysd - default: - return fmt.Errorf("internal error: unknown daemon-scope %q", snapApp.DaemonScope) - } // collect all services for a single call to systemctl extra := len(snapApp.Sockets) @@ -414,15 +492,11 @@ serviceNames = append(serviceNames, timerUnit) } - // sysd.Status() makes sure that we get only the units we asked - // for and raises an error otherwise - sts, err := sysd.Status(serviceNames) + sts, err := sd.queryServiceStatus(snapApp.DaemonScope, serviceNames) if err != nil { return fmt.Errorf("cannot get status of services of app %q: %v", appInfo.Name, err) } - if len(sts) != len(serviceNames) { - return fmt.Errorf("cannot get status of services of app %q: expected %d results, got %d", appInfo.Name, len(serviceNames), len(sts)) - } + for _, st := range sts { switch filepath.Ext(st.Name) { case ".service": diff -Nru snapd-2.62+23.10/overlord/servicestate/servicestate_test.go snapd-2.63+23.10/overlord/servicestate/servicestate_test.go --- snapd-2.62+23.10/overlord/servicestate/servicestate_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/servicestate/servicestate_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,10 +21,10 @@ import ( "bytes" + "context" "encoding/json" "fmt" "io" - "io/ioutil" "os" "os/user" "path/filepath" @@ -46,16 +46,41 @@ "github.com/snapcore/snapd/snap/snaptest" "github.com/snapcore/snapd/systemd" "github.com/snapcore/snapd/testutil" + "github.com/snapcore/snapd/usersession/agent" "github.com/snapcore/snapd/wrappers" ) -type statusDecoratorSuite struct{} +type statusDecoratorSuite struct { + testutil.DBusTest + tempdir string + agent *agent.SessionAgent +} var _ = Suite(&statusDecoratorSuite{}) +func (s *statusDecoratorSuite) SetUpTest(c *C) { + s.DBusTest.SetUpTest(c) + s.tempdir = c.MkDir() + dirs.SetRootDir(s.tempdir) + + xdgRuntimeDir := fmt.Sprintf("%s/%d", dirs.XdgRuntimeDirBase, os.Getuid()) + err := os.MkdirAll(xdgRuntimeDir, 0700) + c.Assert(err, IsNil) + s.agent, err = agent.New() + c.Assert(err, IsNil) + s.agent.Start() +} + +func (s *statusDecoratorSuite) TearDownTest(c *C) { + if s.agent != nil { + err := s.agent.Stop() + c.Check(err, IsNil) + } + dirs.SetRootDir("") + s.DBusTest.TearDownTest(c) +} + func (s *statusDecoratorSuite) TestDecorateWithStatus(c *C) { - dirs.SetRootDir(c.MkDir()) - defer dirs.SetRootDir("") snp := &snap.Info{ SideInfo: snap.SideInfo{ RealName: "foo", @@ -229,7 +254,8 @@ {Name: "org.example.Svc", Type: "dbus", Active: true, Enabled: true}, }) - // No state is currently extracted for user daemons + // When using a decorator without any uid provided, the global status is + // fetched, which is only enablement app = &client.AppInfo{ Snap: snp.InstanceName(), Name: "svc", @@ -276,6 +302,114 @@ } } +func (s *statusDecoratorSuite) TestUserServiceDecorateWithStatus(c *C) { + snp := &snap.Info{ + SideInfo: snap.SideInfo{ + RealName: "foo", + Revision: snap.R(1), + }, + } + err := os.MkdirAll(snp.MountDir(), 0755) + c.Assert(err, IsNil) + err = os.Symlink(snp.Revision.String(), filepath.Join(filepath.Dir(snp.MountDir()), "current")) + c.Assert(err, IsNil) + + disabled := false + r := systemd.MockSystemctl(func(args ...string) (buf []byte, err error) { + c.Check(args[0], Equals, "--user") + c.Check(args[1], Equals, "show") + unit := args[3] + + activeState, unitState := "active", "enabled" + if disabled { + activeState = "inactive" + unitState = "disabled" + } + + if strings.HasSuffix(unit, ".timer") || strings.HasSuffix(unit, ".socket") || strings.HasSuffix(unit, ".target") { + // Units using the baseProperties query + return []byte(fmt.Sprintf(`Id=%s +Names=%[1]s +ActiveState=%s +UnitFileState=%s +`, unit, activeState, unitState)), nil + } else { + // Units using the extendedProperties query + return []byte(fmt.Sprintf(`Id=%s +Names=%[1]s +Type=simple +ActiveState=%s +UnitFileState=%s +NeedDaemonReload=no +`, unit, activeState, unitState)), nil + } + }) + defer r() + + curr, err := user.Current() + c.Assert(err, IsNil) + + sd := servicestate.NewStatusDecoratorForUid(nil, context.Background(), curr.Uid) + + // not a service + app := &client.AppInfo{ + Snap: "foo", + Name: "app", + } + snapApp := &snap.AppInfo{Snap: snp, Name: "app"} + + err = sd.DecorateWithStatus(app, snapApp) + c.Assert(err, IsNil) + + for _, enabled := range []bool{true, false} { + disabled = !enabled + + app = &client.AppInfo{ + Snap: snp.InstanceName(), + Name: "svc", + Daemon: "simple", + } + snapApp = &snap.AppInfo{ + Snap: snp, + Name: "svc", + Daemon: "simple", + DaemonScope: snap.UserDaemon, + } + snapApp.Sockets = map[string]*snap.SocketInfo{ + "socket1": { + App: snapApp, + Name: "socket1", + ListenStream: "a.socket", + }, + } + snapApp.Timer = &snap.TimerInfo{ + App: snapApp, + Timer: "10:00", + } + snapApp.ActivatesOn = []*snap.SlotInfo{ + { + Snap: snp, + Name: "dbus-slot", + Interface: "dbus", + Attrs: map[string]interface{}{ + "bus": "session", + "name": "org.example.Svc", + }, + }, + } + + err = sd.DecorateWithStatus(app, snapApp) + c.Assert(err, IsNil) + c.Check(app.Active, Equals, enabled) + c.Check(app.Enabled, Equals, true) // when a service is slot activated its always enabled + c.Check(app.Activators, DeepEquals, []client.AppActivator{ + {Name: "socket1", Type: "socket", Active: enabled, Enabled: enabled}, + {Name: "svc", Type: "timer", Active: enabled, Enabled: enabled}, + {Name: "org.example.Svc", Type: "dbus", Active: true, Enabled: true}, + }) + } +} + type instructionSuite struct { rootUser *user.User defaultUser *user.User @@ -820,7 +954,7 @@ c.Check(n, Equals, 100) c.Check(follow, Equals, false) c.Check(namespaces, Equals, false) - return ioutil.NopCloser(strings.NewReader("")), nil + return io.NopCloser(strings.NewReader("")), nil }) defer restore() @@ -898,7 +1032,7 @@ c.Check(n, Equals, 100) c.Check(follow, Equals, false) c.Check(namespaces, Equals, true) - return ioutil.NopCloser(strings.NewReader("")), nil + return io.NopCloser(strings.NewReader("")), nil }) defer restore() diff -Nru snapd-2.62+23.10/overlord/snapshotstate/backend/backend.go snapd-2.63+23.10/overlord/snapshotstate/backend/backend.go --- snapd-2.62+23.10/overlord/snapshotstate/backend/backend.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapshotstate/backend/backend.go 2024-04-24 00:00:39.000000000 +0000 @@ -1081,6 +1081,16 @@ files = append(files, path.Base(snapshotFile.Name())) } + // SnapshotExporter has se.Close() set as a finalizer, thus when the object + // is no longer referenced, se.Close() (which closes all files) will be + // called automatically after/during a GC pass. We don't know if the caller + // retains a reference to the object (eg. for any outstanding calls to some + // of its functions), and the last explicit reference in the code above was + // kept for the purpose of accessing the snapshot files list, which is done + // before the final file is read, so we need to mark object as alive until + // after every file has been read. + runtime.KeepAlive(se) + // write the metadata last, then the client can use that to // validate the archive is complete meta := exportMetadata{ diff -Nru snapd-2.62+23.10/overlord/snapshotstate/backend/reader.go snapd-2.63+23.10/overlord/snapshotstate/backend/reader.go --- snapd-2.62+23.10/overlord/snapshotstate/backend/reader.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapshotstate/backend/reader.go 2024-04-24 00:00:39.000000000 +0000 @@ -27,7 +27,6 @@ "fmt" "hash" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -126,7 +125,7 @@ reader.Broken = err.Error() return reader, err } - metaHashBuf, err := ioutil.ReadAll(io.TeeReader(metaHashReader, &sz)) + metaHashBuf, err := io.ReadAll(io.TeeReader(metaHashReader, &sz)) if err != nil { reader.Broken = err.Error() return reader, err @@ -294,7 +293,7 @@ } // TODO: have something more atomic in osutil - tempdir, err := ioutil.TempDir(parent, ".snapshot") + tempdir, err := os.MkdirTemp(parent, ".snapshot") if err != nil { return rs, err } diff -Nru snapd-2.62+23.10/overlord/snapshotstate/snapshotstate_test.go snapd-2.63+23.10/overlord/snapshotstate/snapshotstate_test.go --- snapd-2.62+23.10/overlord/snapshotstate/snapshotstate_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapshotstate/snapshotstate_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,7 +26,6 @@ "errors" "fmt" "io" - "io/ioutil" "os" "os/exec" "os/user" @@ -1844,7 +1843,7 @@ buf := bytes.NewBufferString(fakeSnapshotData) restore := snapshotstate.MockBackendImport(func(ctx context.Context, id uint64, r io.Reader, flags *backend.ImportFlags) ([]string, error) { - d, err := ioutil.ReadAll(r) + d, err := io.ReadAll(r) c.Assert(err, check.IsNil) c.Check(fakeSnapshotData, check.Equals, string(d)) return fakeSnapNames, nil diff -Nru snapd-2.62+23.10/overlord/snapstate/autorefresh.go snapd-2.63+23.10/overlord/snapstate/autorefresh.go --- snapd-2.62+23.10/overlord/snapstate/autorefresh.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/autorefresh.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2017-2023 Canonical Ltd + * Copyright (C) 2017-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -20,12 +20,15 @@ package snapstate import ( + "bytes" "context" "errors" "fmt" "os" + "strings" "time" + "github.com/snapcore/snapd/features" "github.com/snapcore/snapd/httputil" "github.com/snapcore/snapd/i18n" "github.com/snapcore/snapd/logger" @@ -739,8 +742,19 @@ // maybeAsyncPendingRefreshNotification broadcasts desktop notification in a goroutine. // // The notification is sent only if no snap has the marker "snap-refresh-observe" -// interface connected. +// interface connected and the "refresh-app-awareness-ux" experimental flag is disabled. func maybeAsyncPendingRefreshNotification(ctx context.Context, st *state.State, refreshInfo *userclient.PendingSnapRefreshInfo) { + tr := config.NewTransaction(st) + experimentalRefreshAppAwarenessUX, err := features.Flag(tr, features.RefreshAppAwarenessUX) + if err != nil && !config.IsNoOption(err) { + logger.Noticef("Cannot send notification about pending refresh: %v", err) + return + } + if experimentalRefreshAppAwarenessUX { + // use notices + warnings fallback flow instead + return + } + markerExists, err := HasActiveConnection(st, "snap-refresh-observe") if err != nil { logger.Noticef("Cannot send notification about pending refresh: %v", err) @@ -880,6 +894,86 @@ st.Set("last-recorded-inhibited-snaps", curInhibitedSnaps) } + if err := maybeAddRefreshInhibitWarningFallback(st, curInhibitedSnaps); err != nil { + logger.Noticef("Cannot add refresh inhibition warning: %v", err) + } + + return nil +} + +// maybeAddRefreshInhibitWarningFallback records a warning if the set of +// inhibited snaps was changed since the last notice. +// +// The warning is recorded only if: +// 1. There is at least 1 inhibited snap. +// 2. The "refresh-app-awareness-ux" experimental flag is enabled. +// 3. No snap exists with the marker "snap-refresh-observe" interface connected. +// +// Note: If no snaps are inhibited then existing inhibition warning +// will be removed. +func maybeAddRefreshInhibitWarningFallback(st *state.State, inhibitedSnaps map[string]bool) error { + if len(inhibitedSnaps) == 0 { + // no more inhibited snaps, remove inhibition warning if it exists. + return removeRefreshInhibitWarning(st) + } + + tr := config.NewTransaction(st) + experimentalRefreshAppAwarenessUX, err := features.Flag(tr, features.RefreshAppAwarenessUX) + if err != nil && !config.IsNoOption(err) { + return err + } + if !experimentalRefreshAppAwarenessUX { + // snapd will send notifications directly, check maybeAsyncPendingRefreshNotification + return nil + } + + markerExists, err := HasActiveConnection(st, "snap-refresh-observe") + if err != nil { + return err + } + if markerExists { + // do nothing + return nil + } + + // let's fallback to issuing warnings if no snap exists with the + // marker snap-refresh-observe interface connected. + + // remove inhibition warning if it exists. + if err := removeRefreshInhibitWarning(st); err != nil { + return err + } + + // building warning message + var snapsBuf bytes.Buffer + i := 0 + for snap := range inhibitedSnaps { + if i > 0 { + snapsBuf.WriteString(", ") + } + snapsBuf.WriteString(snap) + i++ + } + message := fmt.Sprintf("cannot refresh (%s) due running apps; close running apps to continue refresh.", snapsBuf.String()) + + // wait some time before showing the same warning to the user again after okaying. + st.AddWarning(message, &state.AddWarningOptions{RepeatAfter: 24 * time.Hour}) + + return nil +} + +// removeRefreshInhibitWarning removes inhibition warning if it exists. +func removeRefreshInhibitWarning(st *state.State) error { + // XXX: is it worth it to check for unexpected multiple matches? + for _, warning := range st.AllWarnings() { + if !strings.HasSuffix(warning.String(), "close running apps to continue refresh.") { + continue + } + if err := st.RemoveWarning(warning.String()); err != nil && !errors.Is(err, state.ErrNoState) { + return err + } + return nil + } return nil } diff -Nru snapd-2.62+23.10/overlord/snapstate/autorefresh_test.go snapd-2.63+23.10/overlord/snapstate/autorefresh_test.go --- snapd-2.62+23.10/overlord/snapstate/autorefresh_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/autorefresh_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2017-2022 Canonical Ltd + * Copyright (C) 2017-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -21,6 +21,7 @@ import ( "context" + "encoding/json" "errors" "fmt" "os" @@ -1262,19 +1263,37 @@ s.state.Unlock() } -func (s *autoRefreshTestSuite) TestMaybeAddRefreshInhibitNotice(c *C) { +func (s *autoRefreshTestSuite) testMaybeAddRefreshInhibitNotice(c *C, markerInterfaceConnected bool, warningFallback bool) { st := s.state st.Lock() defer st.Unlock() + var connCheckCalled int + restore := snapstate.MockHasActiveConnection(func(st *state.State, iface string) (bool, error) { + connCheckCalled++ + c.Check(iface, Equals, "snap-refresh-observe") + return markerInterfaceConnected, nil + }) + defer restore() + + // let's add some random warnings + st.Warnf("this is a random warning 1") + st.Warnf("this is a random warning 2") + err := snapstate.MaybeAddRefreshInhibitNotice(st) c.Assert(err, IsNil) - // empty set of inhibited snaps unchanged -> [], no notice recorded + // empty set of inhibited snaps unchanged -> [] + // no notice recorded c.Assert(st.Notices(nil), HasLen, 0) + // no "refresh inhibition" warnings recorded + checkNoRefreshInhibitWarning(c, st) // Verify list is empty checkLastRecordedInhibitedSnaps(c, st, nil) now := time.Now() + warningTime := now + // mock time to determine if recorded warning is recent + defer state.MockTime(warningTime)() snapstate.Set(s.state, "some-snap", &snapstate.SnapState{ Active: true, Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{ @@ -1286,14 +1305,33 @@ }) err = snapstate.MaybeAddRefreshInhibitNotice(st) c.Assert(err, IsNil) - // set of inhibited snaps changed -> ["some-snap"], notice recorded - checkRefreshInhibitNotice(c, st, 1) + // set of inhibited snaps changed -> ["some-snap"] + // notice recorded + expectedOccurrances := 1 + checkRefreshInhibitNotice(c, st, expectedOccurrances) + // check warnings fallback + if warningFallback { + checkRefreshInhibitWarning(c, st, []string{"some-snap"}, warningTime) + } else { + checkNoRefreshInhibitWarning(c, st) + } + // check that the set of last recorded inhibited snaps is persisted checkLastRecordedInhibitedSnaps(c, st, []string{"some-snap"}) + // mock time to determine if recorded warning is recent + warningTime = warningTime.Add(1 * time.Hour) + defer state.MockTime(warningTime)() err = snapstate.MaybeAddRefreshInhibitNotice(st) c.Assert(err, IsNil) - // set of inhibited snaps unchanged -> ["some-snap"], no notice recorded - checkRefreshInhibitNotice(c, st, 1) + // set of inhibited snaps unchanged -> ["some-snap"] + // no new notice recorded + checkRefreshInhibitNotice(c, st, expectedOccurrances) + // check warnings fallback + if warningFallback { + checkRefreshInhibitWarning(c, st, []string{"some-snap"}, warningTime) + } else { + checkNoRefreshInhibitWarning(c, st) + } checkLastRecordedInhibitedSnaps(c, st, []string{"some-snap"}) // mark "some-snap" as not inhibited @@ -1316,14 +1354,219 @@ SnapType: string(snap.TypeApp), RefreshInhibitedTime: &now, }) + // mock time to determine if recorded warning is recent + warningTime = warningTime.Add(1 * time.Hour) + defer state.MockTime(warningTime)() err = snapstate.MaybeAddRefreshInhibitNotice(st) c.Assert(err, IsNil) - // set of inhibited snaps changed -> ["some-other-snap"], notice recorded - checkRefreshInhibitNotice(c, st, 2) + // set of inhibited snaps changed -> ["some-other-snap"] + // notice recorded + expectedOccurrances++ + checkRefreshInhibitNotice(c, st, expectedOccurrances) + // check warnings fallback + if warningFallback { + checkRefreshInhibitWarning(c, st, []string{"some-other-snap"}, warningTime) + } else { + checkNoRefreshInhibitWarning(c, st) + } checkLastRecordedInhibitedSnaps(c, st, []string{"some-other-snap"}) + + // mark "some-other-snap" as not inhibited + snapstate.Set(s.state, "some-other-snap", &snapstate.SnapState{ + Active: true, + Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{ + {RealName: "some-other-snap", SnapID: "some-other-snap-id", Revision: snap.R(1)}, + }), + Current: snap.R(1), + SnapType: string(snap.TypeApp), + RefreshInhibitedTime: nil, + }) + // mock time to determine if recorded warning is recent + warningTime = warningTime.Add(1 * time.Hour) + defer state.MockTime(warningTime)() + err = snapstate.MaybeAddRefreshInhibitNotice(st) + c.Assert(err, IsNil) + // set of inhibited snaps changed -> [] + // notice recorded + expectedOccurrances++ + checkRefreshInhibitNotice(c, st, expectedOccurrances) + // no warning should be recorded and existing warning should be + // removed if inhibited snaps set is empty + checkNoRefreshInhibitWarning(c, st) + checkLastRecordedInhibitedSnaps(c, st, []string{}) + + // exercise multiple snaps inhibited + // mark "some-snap" as inhibited + snapstate.Set(s.state, "some-snap", &snapstate.SnapState{ + Active: true, + Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{ + {RealName: "some-snap", SnapID: "some-snap-id", Revision: snap.R(1)}, + }), + Current: snap.R(1), + SnapType: string(snap.TypeApp), + RefreshInhibitedTime: &now, + }) + // mark "some-other-snap" as inhibited + snapstate.Set(s.state, "some-other-snap", &snapstate.SnapState{ + Active: true, + Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{ + {RealName: "some-other-snap", SnapID: "some-other-snap-id", Revision: snap.R(1)}, + }), + Current: snap.R(1), + SnapType: string(snap.TypeApp), + RefreshInhibitedTime: &now, + }) + // mock time to determine if recorded warning is recent + warningTime = warningTime.Add(1 * time.Hour) + defer state.MockTime(warningTime)() + err = snapstate.MaybeAddRefreshInhibitNotice(st) + c.Assert(err, IsNil) + // set of inhibited snaps changed -> ["some-snap", "some-other-snap"] + // notice recorded + expectedOccurrances++ + checkRefreshInhibitNotice(c, st, expectedOccurrances) + // check warnings fallback + if warningFallback { + checkRefreshInhibitWarning(c, st, []string{"some-snap", "some-other-snap"}, warningTime) + } else { + checkNoRefreshInhibitWarning(c, st) + } + // check that the set of last recorded inhibited snaps is persisted + checkLastRecordedInhibitedSnaps(c, st, []string{"some-snap", "some-other-snap"}) +} + +func (s *autoRefreshTestSuite) TestMaybeAddRefreshInhibitNotice(c *C) { + s.enableRefreshAppAwarenessUX() + const markerInterfaceConnected = true + const warningFallback = false + s.testMaybeAddRefreshInhibitNotice(c, markerInterfaceConnected, warningFallback) +} + +func (s *autoRefreshTestSuite) TestMaybeAddRefreshInhibitNoticeWarningFallbackError(c *C) { + st := s.state + st.Lock() + defer st.Unlock() + + logbuf, restore := logger.MockLogger() + defer restore() + + // mark "some-snap" as inhibited + now := time.Now() + snapstate.Set(s.state, "some-snap", &snapstate.SnapState{ + Active: true, + Sequence: snapstatetest.NewSequenceFromSnapSideInfos([]*snap.SideInfo{ + {RealName: "some-snap", SnapID: "some-snap-id", Revision: snap.R(1)}, + }), + Current: snap.R(1), + SnapType: string(snap.TypeApp), + RefreshInhibitedTime: &now, + }) + + // Highly unlikely but just in case + tr := config.NewTransaction(s.state) + tr.Set("core", "experimental.refresh-app-awareness-ux", "trigger-error") + tr.Commit() + + err := snapstate.MaybeAddRefreshInhibitNotice(st) + // warning fallback error is not propagated, only logged + c.Assert(err, IsNil) + // check error is logged + c.Check(logbuf.String(), testutil.Contains, `Cannot add refresh inhibition warning: refresh-app-awareness-ux can only be set to 'true' or 'false', got "trigger-error"`) + // notice recorded + checkRefreshInhibitNotice(c, st, 1) + // no warnings recorded due to error + checkNoRefreshInhibitWarning(c, st) + + restore = snapstate.MockHasActiveConnection(func(st *state.State, iface string) (bool, error) { + return false, fmt.Errorf("boom") + }) + defer restore() + + st.Unlock() + s.enableRefreshAppAwarenessUX() + st.Lock() + err = snapstate.MaybeAddRefreshInhibitNotice(st) + // warning fallback error is not propagated, only logged + c.Assert(err, IsNil) + // check error is logged + c.Check(logbuf.String(), testutil.Contains, "Cannot add refresh inhibition warning: boom") + // set of inhibited snaps unchanged -> ["some-snap"] + // no new notice recorded + checkRefreshInhibitNotice(c, st, 1) + // no warnings recorded due to error + checkNoRefreshInhibitWarning(c, st) +} + +func (s *autoRefreshTestSuite) TestMaybeAddRefreshInhibitNoticeWarningFallback(c *C) { + s.enableRefreshAppAwarenessUX() + const markerInterfaceConnected = false + const warningFallback = true + s.testMaybeAddRefreshInhibitNotice(c, markerInterfaceConnected, warningFallback) +} + +func (s *autoRefreshTestSuite) TestMaybeAddRefreshInhibitNoticeWarningFallbackNoRAAUX(c *C) { + const markerInterfaceConnected = false + const warningFallback = false // because refresh-app-awareness-ux is disabled + s.testMaybeAddRefreshInhibitNotice(c, markerInterfaceConnected, warningFallback) +} + +func (s *autoRefreshTestSuite) enableRefreshAppAwarenessUX() { + s.state.Lock() + tr := config.NewTransaction(s.state) + tr.Set("core", "experimental.refresh-app-awareness-ux", true) + tr.Commit() + s.state.Unlock() +} + +func checkNoRefreshInhibitWarning(c *C, st *state.State) { + for _, warning := range st.AllWarnings() { + if strings.HasSuffix(warning.String(), "close running apps to continue refresh.") { + c.Error("inhibition warning found") + return + } + } +} + +func checkRefreshInhibitWarning(c *C, st *state.State, snaps []string, warningTime time.Time) { + var inhibitionWarning *state.Warning + for _, warning := range st.AllWarnings() { + if !strings.HasSuffix(warning.String(), "close running apps to continue refresh.") { + continue + } + if inhibitionWarning != nil { + c.Errorf("found multiple inhibition warnings") + return + } + inhibitionWarning = warning + } + + // There is always one warning + c.Assert(inhibitionWarning, NotNil) + w := warningToMap(c, inhibitionWarning) + c.Check(w["message"], Matches, "cannot refresh (.*) due running apps; close running apps to continue refresh.") + for _, snap := range snaps { + c.Check(w["message"], testutil.Contains, snap) + } + c.Check(w["repeat-after"], Equals, "24h0m0s") + if !warningTime.IsZero() { + c.Check(w["last-added"], Equals, warningTime.UTC().Format(time.RFC3339Nano)) + } +} + +// warningToMap converts a Warning to a map using a JSON marshal-unmarshal round trip. +func warningToMap(c *C, warning *state.Warning) map[string]any { + buf, err := json.Marshal(warning) + c.Assert(err, IsNil) + var n map[string]any + err = json.Unmarshal(buf, &n) + c.Assert(err, IsNil) + return n } func (s *autoRefreshTestSuite) TestMaybeAsyncPendingRefreshNotification(c *C) { + s.state.Lock() + defer s.state.Unlock() + var connCheckCalled int restore := snapstate.MockHasActiveConnection(func(st *state.State, iface string) (bool, error) { connCheckCalled++ @@ -1344,12 +1587,29 @@ }) defer restore() + tr := config.NewTransaction(s.state) + tr.Set("core", "experimental.refresh-app-awareness-ux", true) + tr.Commit() + snapstate.MaybeAsyncPendingRefreshNotification(context.TODO(), s.state, expectedInfo) + // no notification as refresh-appawareness-ux is enabled + // i.e. notices + warnings fallback is used instead + c.Check(connCheckCalled, Equals, 0) + c.Check(notificationCalled, Equals, 0) + + tr.Set("core", "experimental.refresh-app-awareness-ux", false) + tr.Commit() + + snapstate.MaybeAsyncPendingRefreshNotification(context.TODO(), s.state, expectedInfo) + // notification sent as refresh-appawareness-ux is now disabled c.Check(connCheckCalled, Equals, 1) c.Check(notificationCalled, Equals, 1) } func (s *autoRefreshTestSuite) TestMaybeAsyncPendingRefreshNotificationSkips(c *C) { + s.state.Lock() + defer s.state.Unlock() + var connCheckCalled int restore := snapstate.MockHasActiveConnection(func(st *state.State, iface string) (bool, error) { connCheckCalled++ diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/copydata.go snapd-2.63+23.10/overlord/snapstate/backend/copydata.go --- snapd-2.62+23.10/overlord/snapstate/backend/copydata.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/copydata.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "os/user" "path/filepath" @@ -286,7 +285,7 @@ } var removeIfEmpty = func(dir string) error { - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { return err } @@ -351,7 +350,7 @@ undoInfo.Created = append(undoInfo.Created, newUserHome) userData := snap.UserDataDir(usr.HomeDir, snapName, rev, opts) - files, err := ioutil.ReadDir(userData) + files, err := os.ReadDir(userData) if err != nil { if errors.Is(err, os.ErrNotExist) { // there's nothing to copy into ~/Snap/ (like on a fresh install) diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/copydata_test.go snapd-2.63+23.10/overlord/snapstate/backend/copydata_test.go --- snapd-2.62+23.10/overlord/snapstate/backend/copydata_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/copydata_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "os/user" "path/filepath" @@ -178,16 +177,16 @@ c.Assert(err, IsNil) canaryDataFile := filepath.Join(v1.DataDir(), "canary.txt") - err = ioutil.WriteFile(canaryDataFile, canaryData, 0644) + err = os.WriteFile(canaryDataFile, canaryData, 0644) c.Assert(err, IsNil) canaryDataFile = filepath.Join(v1.CommonDataDir(), "canary.common") - err = ioutil.WriteFile(canaryDataFile, canaryData, 0644) + err = os.WriteFile(canaryDataFile, canaryData, 0644) c.Assert(err, IsNil) for i := range snapHomeDataDirs { - err = ioutil.WriteFile(filepath.Join(snapHomeDataDirs[i], "canary.home"), canaryData, 0644) + err = os.WriteFile(filepath.Join(snapHomeDataDirs[i], "canary.home"), canaryData, 0644) c.Assert(err, IsNil) - err = ioutil.WriteFile(filepath.Join(snapHomeCommonDirs[i], "canary.common_home"), canaryData, 0644) + err = os.WriteFile(filepath.Join(snapHomeCommonDirs[i], "canary.common_home"), canaryData, 0644) c.Assert(err, IsNil) } @@ -263,7 +262,7 @@ } func (s *copydataSuite) populatedData(d string) string { - bs, err := ioutil.ReadFile(filepath.Join(dirs.SnapDataDir, "hello", d, "random-subdir", "canary")) + bs, err := os.ReadFile(filepath.Join(dirs.SnapDataDir, "hello", d, "random-subdir", "canary")) if err == nil { return string(bs) } @@ -797,13 +796,13 @@ // check versioned file was moved opts := &dirs.SnapDirOptions{HiddenSnapDataDir: true} revFile := filepath.Join(info.UserDataDir(homedir, opts), "canary.home") - data, err := ioutil.ReadFile(revFile) + data, err := os.ReadFile(revFile) c.Assert(err, IsNil) c.Assert(data, DeepEquals, []byte("10\n")) // check common file was moved commonFile := filepath.Join(info.UserCommonDataDir(homedir, opts), "file.txt") - data, err = ioutil.ReadFile(commonFile) + data, err = os.ReadFile(commonFile) c.Assert(err, IsNil) c.Assert(data, DeepEquals, []byte("some content")) @@ -894,7 +893,7 @@ c.Assert(s.be.HideSnapData("hello"), IsNil) // check versioned file was moved and previous contents were overwritten - data, err := ioutil.ReadFile(revFile) + data, err := os.ReadFile(revFile) c.Assert(err, IsNil) c.Assert(data, DeepEquals, []byte("10\n")) @@ -949,13 +948,13 @@ // check versioned file was restored revFile := filepath.Join(info.UserDataDir(homedir, nil), "file.txt") - data, err := ioutil.ReadFile(revFile) + data, err := os.ReadFile(revFile) c.Assert(err, IsNil) c.Assert(data, DeepEquals, []byte("some content")) // check common file was restored commonFile := filepath.Join(info.UserCommonDataDir(homedir, nil), "file.txt") - data, err = ioutil.ReadFile(commonFile) + data, err = os.ReadFile(commonFile) c.Assert(err, IsNil) c.Assert(data, DeepEquals, []byte("other content")) @@ -1069,7 +1068,7 @@ // dir contains a file, shouldn't do anything c.Assert(backend.RemoveIfEmpty(dirs.GlobalRootDir), IsNil) - files, err := ioutil.ReadDir(dirs.GlobalRootDir) + files, err := os.ReadDir(dirs.GlobalRootDir) c.Assert(err, IsNil) c.Check(files, HasLen, 1) c.Check(filepath.Join(dirs.GlobalRootDir, files[0].Name()), testutil.FileEquals, "stuff") @@ -1158,7 +1157,7 @@ c.Check(undoInfo.Created, DeepEquals, []string{exposedHome}) expectedFile := filepath.Join(exposedHome, "file") - data, err := ioutil.ReadFile(expectedFile) + data, err := os.ReadFile(expectedFile) c.Assert(err, IsNil) c.Check(string(data), Equals, "stuff") @@ -1340,7 +1339,7 @@ c.Assert(err, IsNil) c.Check(exists, Equals, true) - entries, err := ioutil.ReadDir(newHomeDir) + entries, err := os.ReadDir(newHomeDir) c.Assert(err, IsNil) c.Check(entries, HasLen, 0) } @@ -1405,7 +1404,7 @@ c.Check(exists, Equals, true) c.Check(isDir, Equals, true) - files, err := ioutil.ReadDir(newHome) + files, err := os.ReadDir(newHome) c.Assert(err, IsNil) c.Check(files, HasLen, 1) c.Check(files[0].Name(), Equals, "file") diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/link.go snapd-2.63+23.10/overlord/snapstate/backend/link.go --- snapd-2.62+23.10/overlord/snapstate/backend/link.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/link.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,6 +25,7 @@ "io/fs" "os" "path/filepath" + "strings" "github.com/snapcore/snapd/boot" "github.com/snapcore/snapd/cmd/snaplock/runinhibit" @@ -184,6 +185,32 @@ return rebootInfo, nil } +func componentLinkPath(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) string { + instanceName, compName, _ := strings.Cut(cpi.ContainerName(), "+") + compBase := snap.ComponentsBaseDir(instanceName) + return filepath.Join(compBase, snapRev.String(), compName) +} + +func (b Backend) LinkComponent(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) error { + mountDir := cpi.MountDir() + linkPath := componentLinkPath(cpi, snapRev) + + // Create components directory + compsDir := filepath.Dir(linkPath) + if err := os.MkdirAll(compsDir, 0755); err != nil { + return fmt.Errorf("while linking component: %v", err) + } + + // Work out relative path to go from the dir where the symlink lives to + // the mount dir + linkTarget, err := filepath.Rel(compsDir, mountDir) + if err != nil { + return err + } + + return osutil.AtomicSymlink(linkTarget, linkPath) +} + func (b Backend) StartServices(apps []*snap.AppInfo, disabledSvcs []string, meter progress.Meter, tm timings.Measurer) error { flags := &wrappers.StartServicesFlags{Enable: true} return wrappers.StartServices(apps, disabledSvcs, flags, meter, tm) @@ -366,3 +393,22 @@ return nil } + +func (b Backend) UnlinkComponent(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) error { + linkPath := componentLinkPath(cpi, snapRev) + + err := os.Remove(linkPath) + if err != nil { + if os.IsNotExist(err) { + logger.Noticef("cannot remove symlink %q: %v", linkPath, err) + } else { + return err + } + } + + // Try also to remove the / subdirectory, as this might be + // the only installed component. But simply ignore if not empty. + os.Remove(filepath.Dir(linkPath)) + + return nil +} diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/link_test.go snapd-2.63+23.10/overlord/snapstate/backend/link_test.go --- snapd-2.62+23.10/overlord/snapstate/backend/link_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/link_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -971,3 +971,116 @@ c.Check(readMountTarget, Equals, "oldactivevalue") c.Check(readDataTarget, Equals, "olddatavalue") } + +func (s *linkSuite) TestLinkComponentIdempotent(c *C) { + compName := "mycomp" + compRev := snap.R(-2) + snapName := "mysnap" + snapRev := snap.R(2) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName) + c.Assert(os.MkdirAll(cpi.MountDir(), 0755), IsNil) + + err := s.be.LinkComponent(cpi, snapRev) + c.Assert(err, IsNil) + err = s.be.LinkComponent(cpi, snapRev) + c.Assert(err, IsNil) + + linkPath := filepath.Join(dirs.SnapMountDir, snapName, + "components", snapRev.String(), compName) + relTarget, err := os.Readlink(linkPath) + c.Assert(relTarget, Equals, filepath.Join("../mnt", compName, compRev.String())) + c.Assert(err, IsNil) + linkTarget, err := filepath.EvalSymlinks(linkPath) + c.Assert(err, IsNil) + c.Assert(linkTarget, Equals, + filepath.Join(snap.ComponentsBaseDir(snapName), "mnt", compName, compRev.String())) +} + +func (s *linkSuite) TestLinkComponentError(c *C) { + compName := "mycomp" + compRev := snap.R(-2) + snapName := "mysnap" + snapRev := snap.R(2) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName) + c.Assert(os.MkdirAll(cpi.MountDir(), 0755), IsNil) + // Put a regular directory in the link path + linkPath := filepath.Join(dirs.SnapMountDir, snapName, + "components", snapRev.String(), compName) + c.Assert(os.MkdirAll(linkPath, 0755), IsNil) + + err := s.be.LinkComponent(cpi, snapRev) + c.Assert(err, ErrorMatches, `rename .*: file exists`) +} + +func (s *linkSuite) TestUnlinkComponentIdempotent(c *C) { + compName := "mycomp" + compRev := snap.R(-2) + snapName := "mysnap" + snapRev := snap.R(2) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName) + linkPath := filepath.Join(dirs.SnapMountDir, snapName, + "components", snapRev.String(), compName) + target := filepath.Join("../mnt", compName, compRev.String()) + + c.Assert(os.MkdirAll(cpi.MountDir(), 0755), IsNil) + c.Assert(os.MkdirAll(filepath.Dir(linkPath), 0755), IsNil) + c.Assert(osutil.AtomicSymlink(target, linkPath), IsNil) + + err := s.be.UnlinkComponent(cpi, snapRev) + c.Assert(err, IsNil) + c.Assert(cpi.MountDir(), testutil.FilePresent) + // // should be gone + c.Assert(linkPath, testutil.FileAbsent) + c.Assert(filepath.Dir(linkPath), testutil.FileAbsent) + + err = s.be.UnlinkComponent(cpi, snapRev) + c.Assert(err, IsNil) +} + +func (s *linkSuite) TestUnlinkTwoComponents(c *C) { + compName := "mycomp" + compRev := snap.R(-2) + snapName := "mysnap" + snapRev := snap.R(2) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName) + compRevPath := filepath.Join(dirs.SnapMountDir, snapName, + "components", snapRev.String()) + linkPath := filepath.Join(compRevPath, compName) + target := filepath.Join("../mnt", compName, compRev.String()) + + c.Assert(os.MkdirAll(cpi.MountDir(), 0755), IsNil) + c.Assert(os.MkdirAll(filepath.Dir(linkPath), 0755), IsNil) + c.Assert(osutil.AtomicSymlink(target, linkPath), IsNil) + + // Simulate another component installed (dangling symlink, but + // that does not matter) + target2 := filepath.Join("../mnt", "other-comp", "1") + c.Assert(osutil.AtomicSymlink(target2, + filepath.Join(compRevPath, "other-comp")), IsNil) + + err := s.be.UnlinkComponent(cpi, snapRev) + c.Assert(err, IsNil) + c.Assert(cpi.MountDir(), testutil.FilePresent) + // Only last subdir of // should be gone + c.Assert(linkPath, testutil.FileAbsent) + c.Assert(filepath.Dir(linkPath), testutil.FilePresent) + + err = s.be.UnlinkComponent(cpi, snapRev) + c.Assert(err, IsNil) +} + +func (s *linkSuite) TestUnlinkComponentError(c *C) { + compName := "mycomp" + compRev := snap.R(-2) + snapName := "mysnap" + snapRev := snap.R(2) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName) + c.Assert(os.MkdirAll(cpi.MountDir(), 0755), IsNil) + // Put a regular directory inside the link path + insideLinkPath := filepath.Join(dirs.SnapMountDir, snapName, + "components", snapRev.String(), compName, "xx") + c.Assert(os.MkdirAll(insideLinkPath, 0755), IsNil) + + err := s.be.UnlinkComponent(cpi, snapRev) + c.Assert(err, ErrorMatches, `remove .*: directory not empty`) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/mountunit.go snapd-2.63+23.10/overlord/snapstate/backend/mountunit.go --- snapd-2.62+23.10/overlord/snapstate/backend/mountunit.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/mountunit.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,18 +26,12 @@ "github.com/snapcore/snapd/systemd" ) -func addMountUnit(c snap.ContainerPlaceInfo, preseed bool, meter progress.Meter) error { +func addMountUnit(c snap.ContainerPlaceInfo, mountFlags systemd.EnsureMountUnitFlags, sysd systemd.Systemd) error { squashfsPath := dirs.StripRootDir(c.MountFile()) whereDir := dirs.StripRootDir(c.MountDir()) - var sysd systemd.Systemd - if preseed { - sysd = systemd.NewEmulationMode(dirs.GlobalRootDir) - } else { - sysd = systemd.New(systemd.SystemMode, meter) - } _, err := sysd.EnsureMountUnitFile(c.MountDescription(), squashfsPath, whereDir, "squashfs", - systemd.EnsureMountUnitFlags{}) + mountFlags) return err } diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/mountunit_test.go snapd-2.63+23.10/overlord/snapstate/backend/mountunit_test.go --- snapd-2.62+23.10/overlord/snapstate/backend/mountunit_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/mountunit_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -35,6 +35,7 @@ type ParamsForEnsureMountUnitFile struct { description, what, where, fstype string + flags systemd.EnsureMountUnitFlags } type ResultForEnsureMountUnitFile struct { @@ -57,7 +58,7 @@ func (s *FakeSystemd) EnsureMountUnitFile(description, what, where, fstype string, flags systemd.EnsureMountUnitFlags) (string, error) { s.EnsureMountUnitFileCalls = append(s.EnsureMountUnitFileCalls, - ParamsForEnsureMountUnitFile{description, what, where, fstype}) + ParamsForEnsureMountUnitFile{description, what, where, fstype, flags}) return s.EnsureMountUnitFileResult.path, s.EnsureMountUnitFileResult.err } @@ -96,6 +97,14 @@ } func (s *mountunitSuite) TestAddMountUnit(c *C) { + s.testAddMountUnit(c, systemd.EnsureMountUnitFlags{}) +} + +func (s *mountunitSuite) TestAddBeforeDriversMountUnit(c *C) { + s.testAddMountUnit(c, systemd.EnsureMountUnitFlags{StartBeforeDriversLoad: true}) +} + +func (s *mountunitSuite) testAddMountUnit(c *C, flags systemd.EnsureMountUnitFlags) { expectedErr := errors.New("creation error") var sysd *FakeSystemd @@ -114,7 +123,7 @@ Version: "1.1", Architectures: []string{"all"}, } - err := backend.AddMountUnit(info, false, progress.Null) + err := backend.AddMountUnit(info, flags, systemd.New(systemd.SystemMode, progress.Null)) c.Check(err, Equals, expectedErr) // ensure correct parameters @@ -123,6 +132,7 @@ what: "/var/lib/snapd/snaps/foo_13.snap", where: fmt.Sprintf("%s/foo/13", dirs.StripRootDir(dirs.SnapMountDir)), fstype: "squashfs", + flags: flags, } c.Check(sysd.EnsureMountUnitFileCalls, DeepEquals, []ParamsForEnsureMountUnitFile{ expectedParameters, diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/setup.go snapd-2.63+23.10/overlord/snapstate/backend/setup.go --- snapd-2.62+23.10/overlord/snapstate/backend/setup.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/setup.go 2024-04-24 00:00:39.000000000 +0000 @@ -28,9 +28,11 @@ "github.com/snapcore/snapd/cmd/snaplock/runinhibit" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/kernel" + "github.com/snapcore/snapd/logger" "github.com/snapcore/snapd/osutil" "github.com/snapcore/snapd/progress" "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/snap/naming" "github.com/snapcore/snapd/systemd" ) @@ -99,11 +101,17 @@ } // generate the mount unit for the squashfs - if err := addMountUnit(s, b.preseed, meter); err != nil { + t := s.Type() + mountFlags := systemd.EnsureMountUnitFlags{ + PreventRestartIfModified: false, + // We need early mounts only for UC20+/hybrid, also 16.04 + // systemd seems to be buggy if we enable this. + StartBeforeDriversLoad: t == snap.TypeKernel && dev.HasModeenv(), + } + if err := addMountUnit(s, mountFlags, newSystemd(b.preseed, meter)); err != nil { return snapType, nil, err } - t := s.Type() if !setupOpts.SkipKernelExtraction { if err := boot.Kernel(s, t, dev).ExtractKernelAssets(snapf); err != nil { return snapType, nil, fmt.Errorf("cannot install kernel: %s", err) @@ -116,67 +124,21 @@ // SetupKernelSnap does extra configuration for kernel snaps. func (b Backend) SetupKernelSnap(instanceName string, rev snap.Revision, meter progress.Meter) (err error) { - var sysd systemd.Systemd - if b.preseed { - sysd = systemd.NewEmulationMode(dirs.GlobalRootDir) - } else { - sysd = systemd.New(systemd.SystemMode, meter) - } - - // Create early mount for the snap - cpi := snap.MinimalSnapContainerPlaceInfo(instanceName, rev) - - earlyMountDir := kernel.EarlyKernelMountDir(instanceName, rev) - addMountUnitOptions := &systemd.MountUnitOptions{ - MountUnitType: systemd.BeforeDriversLoadMountUnit, - Lifetime: systemd.Persistent, - Description: "Early mount unit for kernel snap", - What: cpi.MountFile(), - Where: dirs.StripRootDir(earlyMountDir), - Fstype: "squashfs", - Options: []string{"nodev,ro,x-gdu.hide,x-gvfs-hide"}, - } - _, err = sysd.EnsureMountUnitFileWithOptions(addMountUnitOptions) - if err != nil { - return fmt.Errorf("cannot create mount in %q: %w", earlyMountDir, err) - } - - // Clean-up mount if the kernel tree cannot be built - defer func() { - if err == nil { - return - } - if e := sysd.RemoveMountUnitFile(earlyMountDir); e != nil { - meter.Notify(fmt.Sprintf("while trying to clean up due to previous failure: %v", e)) - } - }() - // Build kernel tree that will be mounted from initramfs + cpi := snap.MinimalSnapContainerPlaceInfo(instanceName, rev) return kernel.EnsureKernelDriversTree(instanceName, rev, - earlyMountDir, nil, &kernel.KernelDriversTreeOptions{KernelInstall: true}) + cpi.MountDir(), nil, &kernel.KernelDriversTreeOptions{KernelInstall: true}) } func (b Backend) RemoveKernelSnapSetup(instanceName string, rev snap.Revision, meter progress.Meter) error { - if err := kernel.RemoveKernelDriversTree(instanceName, rev); err != nil { - return err - } - - var sysd systemd.Systemd - if b.preseed { - sysd = systemd.NewEmulationMode(dirs.GlobalRootDir) - } else { - sysd = systemd.New(systemd.SystemMode, meter) - } - - earlyMountDir := kernel.EarlyKernelMountDir(instanceName, rev) - return sysd.RemoveMountUnitFile(earlyMountDir) + return kernel.RemoveKernelDriversTree(instanceName, rev) } // SetupComponent prepares and mounts a component for further processing. func (b Backend) SetupComponent(compFilePath string, compPi snap.ContainerPlaceInfo, dev snap.Device, meter progress.Meter) (installRecord *InstallRecord, err error) { // This assumes that the component was already verified or --dangerous was used. - _, snapf, oErr := OpenComponentFile(compFilePath) + compInfo, snapf, oErr := OpenComponentFile(compFilePath) if oErr != nil { return nil, oErr } @@ -214,7 +176,13 @@ } // generate the mount unit for the squashfs - if err := addMountUnit(compPi, b.preseed, meter); err != nil { + mountFlags := systemd.EnsureMountUnitFlags{ + PreventRestartIfModified: false, + // We need early mounts only for UC20+/hybrid, also 16.04 + // systemd seems to be buggy if we enable this. + StartBeforeDriversLoad: compInfo.Type == snap.KernelModulesComponent && dev.HasModeenv(), + } + if err := addMountUnit(compPi, mountFlags, newSystemd(b.preseed, meter)); err != nil { return nil, err } @@ -264,7 +232,7 @@ return err } - // Remove /snap//components// + // Remove /snap//components/mnt// if err := os.RemoveAll(cpi.MountDir()); err != nil { return err } @@ -277,10 +245,6 @@ } } - // TODO should we check here if there are other components installed - // for this snap revision or for other revisions and if not delete - // / and maybe also components//? - return nil } @@ -303,11 +267,17 @@ func (b Backend) RemoveComponentDir(cpi snap.ContainerPlaceInfo) error { compMountDir := cpi.MountDir() - // Remove /snap//components// - os.Remove(compMountDir) - // and /snap//components/ (might fail - // if there are other components installed for this revision) - os.Remove(filepath.Dir(compMountDir)) + // Remove last 3 directories of + // /snap//components/mnt// if they + // are empty (last one should be). Note that subdirectories with snap + // revisions are handled by UnlinkComponent. + for i := 0; i < 3; i++ { + compMountDir = filepath.Dir(compMountDir) + if err := os.Remove(compMountDir); err != nil { + break + } + } + return nil } @@ -325,3 +295,81 @@ func (b Backend) RemoveSnapInhibitLock(instanceName string) error { return runinhibit.RemoveLockFile(instanceName) } + +// SetupKernelModulesComponents changes kernel-modules configuration by adding +// compsToInstall. The components currently active are currentComps, while +// ksnapName and ksnapRev identify the currently active kernel. +func (b Backend) SetupKernelModulesComponents(compsToInstall, currentComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, meter progress.Meter) (err error) { + sysd := newSystemd(b.preseed, meter) + + // newActiveComps will contain the new revisions of components, taken from compsToInstall + newActiveComps := mergeCompSideInfosUpdatingRev(currentComps, compsToInstall) + + return moveKModsComponentsState( + currentComps, newActiveComps, ksnapName, ksnapRev, sysd, + "after failure to set-up kernel modules components") +} + +// RemoveKernelModulesComponentsSetup changes kernel-modules configuration by +// removing compsToRemove and making the final state consider only finalComps. +func (b Backend) RemoveKernelModulesComponentsSetup(compsToRemove, finalComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, meter progress.Meter) (err error) { + sysd := newSystemd(b.preseed, meter) + + // currentActiveComps will contain the current revision, taken from compsToRemove + currentActiveComps := mergeCompSideInfosUpdatingRev(finalComps, compsToRemove) + + return moveKModsComponentsState( + currentActiveComps, finalComps, ksnapName, ksnapRev, sysd, + "after failure to remove set-up of kernel modules components") +} + +// mergeCompSideInfosUpdatingRev returns a merged list from two lists +// of ComponentSideInfo, using the criteria of the elements having the +// same ComponentRef. The rest of the data for an element will come +// from comps2 if ComponentRef is the same in comps1 and comps2, that +// is, the revision is updated in that case. +func mergeCompSideInfosUpdatingRev(comps1, comps2 []*snap.ComponentSideInfo) (merged []*snap.ComponentSideInfo) { + numInComps2 := len(comps2) + comps2Map := make(map[naming.ComponentRef]*snap.ComponentSideInfo, numInComps2) + for _, cti := range comps2 { + comps2Map[cti.Component] = cti + } + merged = append(merged, comps2...) + for _, instComp := range comps1 { + if _, ok := comps2Map[instComp.Component]; !ok { + // Component not in comps2, add + merged = append(merged, instComp) + } + } + + return merged +} + +// moveKModsComponentsState changes kernel-modules set-up from currentComps to +// finalComps, for the kernel/revision specified by ksnapName/ksnapRev. +func moveKModsComponentsState(currentComps, finalComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, sysd systemd.Systemd, cleanErrMsg string) (err error) { + cpi := snap.MinimalSnapContainerPlaceInfo(ksnapName, ksnapRev) + if err := kernel.EnsureKernelDriversTree(ksnapName, ksnapRev, + cpi.MountDir(), finalComps, + &kernel.KernelDriversTreeOptions{KernelInstall: false}); err != nil { + + if e := kernel.EnsureKernelDriversTree(ksnapName, ksnapRev, + cpi.MountDir(), + currentComps, + &kernel.KernelDriversTreeOptions{ + KernelInstall: false}); e != nil { + logger.Noticef("while restoring kernel tree %s: %v", cleanErrMsg, e) + } + + return err + } + + return nil +} + +func newSystemd(preseed bool, meter progress.Meter) systemd.Systemd { + if preseed { + return systemd.NewEmulationMode(dirs.GlobalRootDir) + } + return systemd.New(systemd.SystemMode, meter) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/backend/setup_test.go snapd-2.63+23.10/overlord/snapstate/backend/setup_test.go --- snapd-2.62+23.10/overlord/snapstate/backend/setup_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend/setup_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,6 +20,7 @@ package backend_test import ( + "errors" "fmt" "os" "path/filepath" @@ -37,6 +38,7 @@ "github.com/snapcore/snapd/overlord/snapstate/backend" "github.com/snapcore/snapd/progress" "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/snap/naming" "github.com/snapcore/snapd/snap/snaptest" "github.com/snapcore/snapd/systemd" "github.com/snapcore/snapd/testutil" @@ -69,6 +71,8 @@ }) s.umount = testutil.MockCommand(c, "umount", "") + depmod := testutil.MockCommand(c, "depmod", `echo "depmod default mock" >&2; exit 1`) + s.AddCleanup(func() { depmod.Restore() }) } func (s *setupSuite) TearDownTest(c *C) { @@ -463,7 +467,7 @@ `, snapName, compName) compPath := snaptest.MakeTestComponent(c, componentYaml) - cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, instanceName, snapRev) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, instanceName) installRecord, err := s.be.SetupComponent(compPath, cpi, mockDev, progress.Null) c.Assert(err, IsNil) @@ -476,7 +480,7 @@ // ensure the right unit is created where := filepath.Join(dirs.StripRootDir(dirs.SnapMountDir), - instanceName+"/components/"+snapRev.String()+"/"+compName) + instanceName+"/components/mnt/"+compName+"/"+compRev.String()) mup := systemd.MountUnitPath(where) c.Assert(mup, testutil.FileMatches, fmt.Sprintf("(?ms).*^Where=%s", where)) compBlobPath := "/var/lib/snapd/snaps/" + compFileName @@ -490,7 +494,7 @@ func (s *setupSuite) testSetupComponentUndo(c *C, compName, snapName, instanceName string, compRev, snapRev snap.Revision, installRecord *backend.InstallRecord) { // undo undoes the mount unit and the instdir creation - cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, instanceName, snapRev) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, instanceName) err := s.be.UndoSetupComponent(cpi, installRecord, mockDev, progress.Null) c.Assert(err, IsNil) @@ -514,7 +518,6 @@ func (s *setupSuite) TestSetupComponentCleanupAfterFail(c *C) { snapName := "mysnap" compName := "mycomp" - snapRev := snap.R(11) compRev := snap.R(33) componentYaml := fmt.Sprintf(`component: %s+%s @@ -524,7 +527,7 @@ compPath := snaptest.MakeTestComponent(c, componentYaml) - cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName, snapRev) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapName) r := systemd.MockSystemctl(func(cmd ...string) ([]byte, error) { // mount unit start fails @@ -552,7 +555,7 @@ compRev := snap.R(33) compName := "mycomp" snapInstance := "mysnap_inst" - cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapInstance, snapRev) + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapInstance) installRecord := s.testSetupComponentDo(c, compName, "mysnap", snapInstance, compRev, snapRev) @@ -565,8 +568,39 @@ err = s.be.RemoveComponentDir(cpi) c.Assert(err, IsNil) - // Directory for the snap revision should be gone - c.Assert(osutil.FileExists(filepath.Dir(cpi.MountDir())), Equals, false) + // Directories components/mnt// should be gone + compDir := filepath.Dir(cpi.MountDir()) + mntDir := filepath.Dir(compDir) + compsDir := filepath.Dir(mntDir) + c.Assert(osutil.FileExists(compDir), Equals, false) + c.Assert(osutil.FileExists(mntDir), Equals, false) + c.Assert(osutil.FileExists(compsDir), Equals, false) +} + +func (s *setupSuite) TestSetupComponentFilesDirNotRemoved(c *C) { + snapRev := snap.R(11) + compRev := snap.R(33) + secondCompRev := snap.R(55) + compName := "mycomp" + snapInstance := "mysnap_inst" + cpi := snap.MinimalComponentContainerPlaceInfo(compName, compRev, snapInstance) + + installRecord := s.testSetupComponentDo(c, compName, "mysnap", snapInstance, compRev, snapRev) + s.testSetupComponentDo(c, compName, "mysnap", snapInstance, secondCompRev, snapRev) + + err := s.be.RemoveComponentFiles(cpi, installRecord, mockDev, progress.Null) + c.Assert(err, IsNil) + l, _ := filepath.Glob(filepath.Join(dirs.SnapServicesDir, "*.mount")) + // Still a mount file for the second component + c.Assert(l, HasLen, 1) + c.Assert(osutil.FileExists(cpi.MountDir()), Equals, false) + c.Assert(osutil.FileExists(cpi.MountFile()), Equals, false) + + err = s.be.RemoveComponentDir(cpi) + c.Assert(err, IsNil) + // Directory components/mnt// should be still around + compDir := filepath.Dir(cpi.MountDir()) + c.Assert(osutil.FileExists(compDir), Equals, true) } func (s *setupSuite) TestSetupAndRemoveKernelSnapSetup(c *C) { @@ -578,7 +612,7 @@ defer os.Unsetenv("SNAPPY_SQUASHFS_UNPACK_FOR_TESTS") // Files from the early-mounted snap - snapdir := filepath.Join(dirs.GlobalRootDir, "run/mnt/kernel-snaps/kernel/33") + snapdir := filepath.Join(dirs.SnapMountDir, "kernel/33") fwdir := filepath.Join(snapdir, "firmware") c.Assert(os.MkdirAll(fwdir, 0755), IsNil) c.Assert(os.WriteFile(filepath.Join(fwdir, "bar.bin"), []byte{}, 0644), IsNil) @@ -587,20 +621,12 @@ err := s.be.SetupKernelSnap("kernel", snap.R(33), progress.Null) c.Assert(err, IsNil) - // ensure the right unit is created - what := filepath.Join(dirs.GlobalRootDir, "var/lib/snapd/snaps/kernel_33.snap") - where := "/run/mnt/kernel-snaps/kernel/33" - mup := systemd.MountUnitPath(where) - c.Assert(mup, testutil.FileMatches, fmt.Sprintf("(?ms).*^Where=%s", where)) - c.Assert(mup, testutil.FileMatches, fmt.Sprintf("(?ms).*^What=%s", what)) - - // And the kernel files + // Kernel files are created treedir := filepath.Join(dirs.SnapdStateDir(dirs.GlobalRootDir), "kernel/kernel/33") c.Assert(osutil.FileExists(filepath.Join(treedir, "lib/firmware/bar.bin")), Equals, true) // Now test cleaning-up s.be.RemoveKernelSnapSetup("kernel", snap.R(33), progress.Null) - c.Assert(osutil.FileExists(mup), Equals, false) c.Assert(osutil.FileExists(filepath.Join(treedir, "lib/firmware/bar.bin")), Equals, false) } @@ -613,7 +639,7 @@ defer os.Unsetenv("SNAPPY_SQUASHFS_UNPACK_FOR_TESTS") // File from the early-mounted snap - snapdir := filepath.Join(dirs.GlobalRootDir, "run/mnt/kernel-snaps/kernel/33") + snapdir := filepath.Join(dirs.SnapMountDir, "kernel/33") fwdir := filepath.Join(snapdir, "firmware") c.Assert(os.MkdirAll(fwdir, 0755), IsNil) // Force failure via unexpected file type @@ -628,3 +654,181 @@ c.Assert(osutil.FileExists(mup), Equals, false) c.Assert(osutil.FileExists(treedir), Equals, false) } + +func createKModsComps(c *C, idx, num int, ksnap string, kernRev snap.Revision) []*snap.ComponentSideInfo { + comps := make([]*snap.ComponentSideInfo, num) + for i := range comps { + idxStr := fmt.Sprintf("%d", idx+i) + compName := "comp" + idxStr + compRev := snap.R((idx+i)*10 + idx) + compDir := filepath.Join(dirs.SnapMountDir, + ksnap, "components", "mnt", compName, compRev.String()) + modsDir := filepath.Join(compDir, "modules/6.5.4-3-generic") + c.Assert(os.MkdirAll(modsDir, 0755), IsNil) + c.Assert(os.WriteFile(filepath.Join(modsDir, "foo.ko"), + []byte{}, 0644), IsNil) + + // Link that marks it as active + snapCompForRevDir := filepath.Join(dirs.SnapMountDir, + ksnap, "components", kernRev.String()) + c.Assert(os.MkdirAll(snapCompForRevDir, 0755), IsNil) + linkPath := filepath.Join(snapCompForRevDir, compName) + // Might have a link for a previous component revision + err := os.Remove(linkPath) + if err != nil && !os.IsNotExist(err) { + c.Error(err) + } + c.Assert(os.Symlink(compDir, linkPath), IsNil) + + comps[i] = snap.NewComponentSideInfo( + naming.NewComponentRef(ksnap, compName), compRev) + } + return comps +} + +func (s *setupSuite) TestSetupAndRemoveKernelModulesComponents(c *C) { + ksnap := "kernel" + kernRev := snap.R(33) + toInstall := createKModsComps(c, 1, 2, ksnap, kernRev) + + depmod := testutil.MockCommand(c, "depmod", "") + defer depmod.Restore() + + // Set-up and then remove + s.testSetupKernelModulesComponents(c, toInstall, nil, ksnap, kernRev, "") + s.testRemoveKernelModulesComponents(c, toInstall, nil, ksnap, kernRev, "") +} + +func (s *setupSuite) TestSetupAndRemoveKernelModulesComponentsWithInstalled(c *C) { + ksnap := "kernel" + kernRev := snap.R(33) + + depmod := testutil.MockCommand(c, "depmod", "") + defer depmod.Restore() + + // Set-up + firstInstalled := createKModsComps(c, 1, 2, ksnap, kernRev) + s.testSetupKernelModulesComponents(c, firstInstalled, nil, ksnap, kernRev, "") + // Add components, with some overlap (comp2/3 - new rev for comp2 though, 22) + newComps := createKModsComps(c, 2, 2, ksnap, kernRev) + s.testSetupKernelModulesComponents(c, newComps, firstInstalled, ksnap, kernRev, "") + // twice to check it is idempotent + s.testSetupKernelModulesComponents(c, newComps, firstInstalled, ksnap, kernRev, "") + + // comp1 still there + checkInstalled(c, []*snap.ComponentSideInfo{firstInstalled[0]}, ksnap, kernRev) + // comp1 rev 21 removed + checkRemoved(c, []*snap.ComponentSideInfo{firstInstalled[1]}, ksnap, kernRev) + + // restore to the previous situation + s.testRemoveKernelModulesComponents(c, newComps, firstInstalled, ksnap, kernRev, "") + // twice to check it is idempotent + s.testRemoveKernelModulesComponents(c, newComps, firstInstalled, ksnap, kernRev, "") +} + +func (s *setupSuite) testSetupKernelModulesComponents(c *C, toInstall, installed []*snap.ComponentSideInfo, ksnap string, kernRev snap.Revision, errRegex string) { + bloader := bootloadertest.Mock("mock", c.MkDir()) + bootloader.Force(bloader) + + // Files from the kernel snap + revStr := kernRev.String() + snapdir := filepath.Join(dirs.SnapMountDir, ksnap, revStr) + fwdir := filepath.Join(snapdir, "firmware") + c.Assert(os.MkdirAll(fwdir, 0755), IsNil) + modsdir := filepath.Join(snapdir, "modules/6.5.4-3-generic") + c.Assert(os.MkdirAll(modsdir, 0755), IsNil) + + // Run kernel set-up + err := s.be.SetupKernelSnap(ksnap, kernRev, progress.Null) + c.Assert(err, IsNil) + + // Run modules set-up + err = s.be.SetupKernelModulesComponents(toInstall, installed, ksnap, kernRev, progress.Null) + if errRegex == "" { + c.Assert(err, IsNil) + // ensure new units and files are around + checkInstalled(c, toInstall, ksnap, kernRev) + } else { + c.Assert(err, ErrorMatches, errRegex) + // Old units are still there + checkInstalled(c, installed, ksnap, kernRev) + // New units have been cleaned up + checkRemoved(c, toInstall, ksnap, kernRev) + } +} + +func checkInstalled(c *C, installed []*snap.ComponentSideInfo, ksnap string, kernRev snap.Revision) { + for _, csi := range installed { + treedir := filepath.Join(dirs.SnapdStateDir(dirs.GlobalRootDir), + "kernel", ksnap, kernRev.String(), + "/lib/modules/6.5.4-3-generic/updates", + csi.Component.ComponentName) + dest, err := os.Readlink(treedir) + c.Assert(err, IsNil) + expected := filepath.Join(dirs.SnapMountDir, + ksnap, "components", "mnt", csi.Component.ComponentName, + csi.Revision.String(), "modules/6.5.4-3-generic") + c.Assert(dest, Equals, expected) + + c.Assert(osutil.FileExists(filepath.Join(treedir, "foo.ko")), Equals, true) + } +} + +func checkRemoved(c *C, removed []*snap.ComponentSideInfo, ksnap string, kernRev snap.Revision) { + for _, csi := range removed { + treedir := filepath.Join(dirs.SnapdStateDir(dirs.GlobalRootDir), + "kernel", ksnap, kernRev.String(), + "lib/modules/6.5.4-3-generic/updates", + csi.Component.ComponentName) + dest, err := os.Readlink(treedir) + if err == nil { + // If there is a link it should not point to the revision + // for this csi + revLink := filepath.Join(dirs.SnapMountDir, + ksnap, "components", kernRev.String(), + csi.Component.ComponentName, "modules/6.5.4-3-generic") + c.Assert(dest == revLink, Equals, false) + } + } +} + +func (s *setupSuite) testRemoveKernelModulesComponents(c *C, toRemove, finalComps []*snap.ComponentSideInfo, ksnap string, kernRev snap.Revision, errRegex string) { + err := s.be.RemoveKernelModulesComponentsSetup(toRemove, finalComps, ksnap, kernRev, progress.Null) + if err == nil { + // No left-overs + checkRemoved(c, toRemove, ksnap, kernRev) + // finalComps are installed + checkInstalled(c, finalComps, ksnap, kernRev) + } else { + c.Assert(err, ErrorMatches, errRegex) + // Not removed + checkInstalled(c, toRemove, ksnap, kernRev) + } +} + +func (s *setupSuite) TestRemoveKernelModulesComponentsFails(c *C) { + ksnap := "kernel" + kernRev := snap.R(33) + + depmod := testutil.MockCommand(c, "depmod", "") + defer depmod.Restore() + + r := systemd.MockSystemctl(func(cmd ...string) ([]byte, error) { + // Fail in the penultimate invocation, which disables the unit + // for comp3, rev. 32. + if len(cmd) == 3 && cmd[1] == "disable" && strings.Contains(cmd[2], "comp3-32") { + return nil, errors.New("cannot disable comp3-32") + } + return []byte("ActiveState=inactive\n"), nil + }) + defer r() + + // Set-up + firstInstalled := createKModsComps(c, 1, 2, ksnap, kernRev) + s.testSetupKernelModulesComponents(c, firstInstalled, nil, ksnap, kernRev, "") + newComps := createKModsComps(c, 2, 2, ksnap, kernRev) + s.testSetupKernelModulesComponents(c, newComps, firstInstalled, ksnap, kernRev, "") + // Restore to the previous state, but fail + s.testRemoveKernelModulesComponents(c, newComps, firstInstalled, ksnap, kernRev, + "cannot remove mount in .*: cannot disable comp3-32") +} diff -Nru snapd-2.62+23.10/overlord/snapstate/backend.go snapd-2.63+23.10/overlord/snapstate/backend.go --- snapd-2.62+23.10/overlord/snapstate/backend.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend.go 2024-04-24 00:00:39.000000000 +0000 @@ -75,9 +75,11 @@ SetupSnap(snapFilePath, instanceName string, si *snap.SideInfo, dev snap.Device, opts *backend.SetupSnapOptions, meter progress.Meter) (snap.Type, *backend.InstallRecord, error) SetupKernelSnap(instanceName string, rev snap.Revision, meter progress.Meter) (err error) SetupComponent(compFilePath string, compPi snap.ContainerPlaceInfo, dev snap.Device, meter progress.Meter) (installRecord *backend.InstallRecord, err error) + SetupKernelModulesComponents(compsToInstall, currentComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, meter progress.Meter) (err error) CopySnapData(newSnap, oldSnap *snap.Info, opts *dirs.SnapDirOptions, meter progress.Meter) error SetupSnapSaveData(info *snap.Info, dev snap.Device, meter progress.Meter) error LinkSnap(info *snap.Info, dev snap.Device, linkCtx backend.LinkContext, tm timings.Measurer) (rebootInfo boot.RebootInfo, err error) + LinkComponent(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) error StartServices(svcs []*snap.AppInfo, disabledSvcs []string, meter progress.Meter, tm timings.Measurer) error StopServices(svcs []*snap.AppInfo, reason snap.ServiceStopReason, meter progress.Meter, tm timings.Measurer) error QueryDisabledServices(info *snap.Info, pb progress.Meter) ([]string, error) @@ -92,6 +94,7 @@ // remove related UnlinkSnap(info *snap.Info, linkCtx backend.LinkContext, meter progress.Meter) error + UnlinkComponent(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) error RemoveSnapFiles(s snap.PlaceInfo, typ snap.Type, installRecord *backend.InstallRecord, dev snap.Device, meter progress.Meter) error RemoveSnapDir(s snap.PlaceInfo, hasOtherInstances bool) error RemoveSnapData(info *snap.Info, opts *dirs.SnapDirOptions) error @@ -104,6 +107,7 @@ RemoveSnapInhibitLock(snapName string) error RemoveAllSnapAppArmorProfiles() error RemoveKernelSnapSetup(instanceName string, rev snap.Revision, meter progress.Meter) error + RemoveKernelModulesComponentsSetup(compsToRemove, finalComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, meter progress.Meter) (err error) // alias related UpdateAliases(add []*backend.Alias, remove []*backend.Alias) error diff -Nru snapd-2.62+23.10/overlord/snapstate/backend_test.go snapd-2.63+23.10/overlord/snapstate/backend_test.go --- snapd-2.62+23.10/overlord/snapstate/backend_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/backend_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -85,6 +85,12 @@ dirOpts *dirs.SnapDirOptions undoInfo *backend.UndoInfo + + compsToInstall, currentComps []*snap.ComponentSideInfo + compsToRemove, finalComps []*snap.ComponentSideInfo + + containerName string + containerFileName string } type fakeOps []fakeOp @@ -910,9 +916,9 @@ } func (f *fakeSnappyBackend) SetupKernelSnap(instanceName string, rev snap.Revision, meter progress.Meter) (err error) { - meter.Notify("setup-kernel-snap") + meter.Notify("prepare-kernel-snap") f.appendOp(&fakeOp{ - op: "setup-kernel-snap", + op: "prepare-kernel-snap", }) return nil } @@ -928,7 +934,9 @@ func (f *fakeSnappyBackend) SetupComponent(compFilePath string, compPi snap.ContainerPlaceInfo, dev snap.Device, meter progress.Meter) (installRecord *backend.InstallRecord, err error) { meter.Notify("setup-component") f.appendOp(&fakeOp{ - op: "setup-component", + op: "setup-component", + containerName: compPi.ContainerName(), + containerFileName: compPi.Filename(), }) if strings.HasSuffix(compPi.ContainerName(), "+broken") { return nil, fmt.Errorf("cannot set-up component %q", compPi.ContainerName()) @@ -936,10 +944,38 @@ return &backend.InstallRecord{}, nil } +func (f *fakeSnappyBackend) SetupKernelModulesComponents(compsToInstall, currentComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, meter progress.Meter) (err error) { + meter.Notify("setup-kernel-modules-components") + f.appendOp(&fakeOp{ + op: "setup-kernel-modules-components", + compsToInstall: compsToInstall, + currentComps: currentComps, + }) + if strings.HasSuffix(ksnapName, "+broken") { + return fmt.Errorf("cannot set-up kernel-modules for %s", ksnapName) + } + return nil +} + +func (f *fakeSnappyBackend) RemoveKernelModulesComponentsSetup(compsToRemove, finalComps []*snap.ComponentSideInfo, ksnapName string, ksnapRev snap.Revision, meter progress.Meter) (err error) { + meter.Notify("remove-kernel-modules-components-setup") + f.appendOp(&fakeOp{ + op: "remove-kernel-modules-components-setup", + compsToRemove: compsToRemove, + finalComps: finalComps, + }) + if strings.HasSuffix(ksnapName, "+reverterr") { + return fmt.Errorf("cannot remove set-up of kernel-modules for %s", ksnapName) + } + return nil +} + func (f *fakeSnappyBackend) UndoSetupComponent(cpi snap.ContainerPlaceInfo, installRecord *backend.InstallRecord, dev snap.Device, meter progress.Meter) error { meter.Notify("undo-setup-component") f.appendOp(&fakeOp{ - op: "undo-setup-component", + op: "undo-setup-component", + containerName: cpi.ContainerName(), + containerFileName: cpi.Filename(), }) if strings.HasSuffix(cpi.ContainerName(), "+brokenundo") { return fmt.Errorf("cannot undo set-up of component %q", cpi.ContainerName()) @@ -949,7 +985,9 @@ func (f *fakeSnappyBackend) RemoveComponentDir(cpi snap.ContainerPlaceInfo) error { f.appendOp(&fakeOp{ - op: "remove-component-dir", + op: "remove-component-dir", + containerName: cpi.ContainerName(), + containerFileName: cpi.Filename(), }) return nil } @@ -1136,6 +1174,14 @@ return boot.RebootInfo{RebootRequired: reboot}, nil } +func (f *fakeSnappyBackend) LinkComponent(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) error { + f.appendOp(&fakeOp{ + op: "link-component", + path: cpi.MountDir(), + }) + return f.maybeErrForLastOp() +} + func svcSnapMountDir(svcs []*snap.AppInfo) string { if len(svcs) == 0 { return "" @@ -1247,6 +1293,14 @@ }) return f.maybeErrForLastOp() } + +func (f *fakeSnappyBackend) UnlinkComponent(cpi snap.ContainerPlaceInfo, snapRev snap.Revision) error { + f.appendOp(&fakeOp{ + op: "unlink-component", + path: cpi.MountDir(), + }) + return f.maybeErrForLastOp() +} func (f *fakeSnappyBackend) RemoveSnapFiles(s snap.PlaceInfo, typ snap.Type, installRecord *backend.InstallRecord, dev snap.Device, meter progress.Meter) error { meter.Notify("remove-snap-files") diff -Nru snapd-2.62+23.10/overlord/snapstate/component.go snapd-2.63+23.10/overlord/snapstate/component.go --- snapd-2.62+23.10/overlord/snapstate/component.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/component.go 2024-04-24 00:00:39.000000000 +0000 @@ -170,6 +170,13 @@ // TODO hooks for components + if compSetup.CompType == snap.KernelModulesComponent { + kmodSetup := st.NewTask("prepare-kernel-modules-components", + fmt.Sprintf(i18n.G("Prepare kernel-modules component %q%s"), + compSi.Component, revisionStr)) + addTask(kmodSetup) + } + // We might be replacing a component if a local install, otherwise // this is not really possible. compInstalled := snapst.IsComponentInCurrentSeq(compSi.Component) @@ -186,6 +193,14 @@ compSi.Component, revisionStr)) addTask(linkSnap) + // clean-up previous revision of the component if present + if compInstalled { + discardComp := st.NewTask("discard-component", fmt.Sprintf(i18n.G( + "Discard previous revision for component %q"), + compSi.Component)) + addTask(discardComp) + } + installSet := state.NewTaskSet(tasks...) installSet.MarkEdge(prepare, BeginEdge) installSet.MarkEdge(linkSnap, MaybeRebootEdge) diff -Nru snapd-2.62+23.10/overlord/snapstate/component_install_test.go snapd-2.63+23.10/overlord/snapstate/component_install_test.go --- snapd-2.62+23.10/overlord/snapstate/component_install_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/component_install_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -41,6 +41,8 @@ compOptRevisionPresent // Component revision is used by the currently active snap revision compOptIsActive + // Component is of kernel-modules type + compTypeIsKernMods ) // opts is a bitset with compOpt* as possible values. @@ -56,12 +58,18 @@ if opts&compOptRevisionPresent == 0 { startTasks = append(startTasks, "mount-component") } + if opts&compTypeIsKernMods != 0 { + startTasks = append(startTasks, "prepare-kernel-modules-components") + } // Component is installed (implicit if compOptRevisionPresent is set) if opts&compOptIsActive != 0 { startTasks = append(startTasks, "unlink-current-component") } // link-component is always present startTasks = append(startTasks, "link-component") + if opts&compOptIsActive != 0 { + startTasks = append(startTasks, "discard-component") + } return startTasks } @@ -104,10 +112,14 @@ } func createTestComponent(c *C, snapName, compName string) (*snap.ComponentInfo, string) { + return createTestComponentWithType(c, snapName, compName, "test") +} + +func createTestComponentWithType(c *C, snapName, compName string, typ string) (*snap.ComponentInfo, string) { componentYaml := fmt.Sprintf(`component: %s+%s -type: test +type: %s version: 1.0 -`, snapName, compName) +`, snapName, compName, typ) compPath := snaptest.MakeTestComponent(c, componentYaml) compf, err := snapfile.Open(compPath) c.Assert(err, IsNil) @@ -119,13 +131,17 @@ } func createTestSnapInfoForComponent(c *C, snapName string, snapRev snap.Revision, compName string) *snap.Info { + return createTestSnapInfoForComponentWithType(c, snapName, snapRev, compName, "test") +} + +func createTestSnapInfoForComponentWithType(c *C, snapName string, snapRev snap.Revision, compName, typ string) *snap.Info { snapYaml := fmt.Sprintf(`name: %s type: app version: 1.1 components: %s: - type: test -`, snapName, compName) + type: %s +`, snapName, compName, typ) info, err := snap.InfoFromSnapYaml([]byte(snapYaml)) c.Assert(err, IsNil) info.SideInfo = snap.SideInfo{RealName: snapName, Revision: snapRev} @@ -484,3 +500,27 @@ c.Assert(err.Error(), Equals, `snap "some-snap" has "update" change in progress`) } + +func (s *snapmgrTestSuite) TestInstallKernelModulesComponentPath(c *C) { + const snapName = "mysnap" + const compName = "mycomp" + snapRev := snap.R(1) + _, compPath := createTestComponentWithType(c, snapName, compName, "kernel-modules") + info := createTestSnapInfoForComponentWithType(c, snapName, snapRev, compName, "kernel-modules") + + s.state.Lock() + defer s.state.Unlock() + + setStateWithOneSnap(s.state, snapName, snapRev) + + csi := snap.NewComponentSideInfo(naming.ComponentRef{ + SnapName: snapName, ComponentName: compName}, snap.R(33)) + ts, err := snapstate.InstallComponentPath(s.state, csi, info, compPath, + snapstate.Flags{}) + c.Assert(err, IsNil) + + verifyComponentInstallTasks(c, compOptIsLocal|compTypeIsKernMods, ts) + c.Assert(s.state.TaskCount(), Equals, len(ts.Tasks())) + // File is not deleted + c.Assert(osutil.FileExists(compPath), Equals, true) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/component_test.go snapd-2.63+23.10/overlord/snapstate/component_test.go --- snapd-2.62+23.10/overlord/snapstate/component_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/component_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -31,7 +31,7 @@ . "gopkg.in/check.v1" ) -func (s *snapmgrTestSuite) mockComponentInfos(c *C, snapName string, compNames []string) { +func (s *snapmgrTestSuite) mockComponentInfos(c *C, snapName string, compNames []string, compRevs []snap.Revision) { cis := make([]*snap.ComponentInfo, len(compNames)) for i, comp := range compNames { componentYaml := fmt.Sprintf(`component: %s+%s @@ -45,8 +45,8 @@ s.AddCleanup(snapstate.MockReadComponentInfo(func( compMntDir string) (*snap.ComponentInfo, error) { - for _, ci := range cis { - if strings.HasSuffix(compMntDir, "/"+ci.Component.ComponentName) { + for i, ci := range cis { + if strings.HasSuffix(compMntDir, "/"+ci.Component.ComponentName+"/"+compRevs[i].String()) { return ci, nil } } @@ -73,7 +73,8 @@ csi := snap.NewComponentSideInfo(cref, compRev) cref2 := naming.NewComponentRef(snapName, compName2) csi2 := snap.NewComponentSideInfo(cref2, compRev) - s.mockComponentInfos(c, snapName, []string{compName, compName2}) + s.mockComponentInfos(c, snapName, []string{compName, compName2}, + []snap.Revision{compRev, compRev}) snapSt := &snapstate.SnapState{ Active: true, diff -Nru snapd-2.62+23.10/overlord/snapstate/cookies_test.go snapd-2.63+23.10/overlord/snapstate/cookies_test.go --- snapd-2.62+23.10/overlord/snapstate/cookies_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/cookies_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" @@ -108,14 +107,14 @@ cookieFile := filepath.Join(dirs.SnapCookieDir, "snap.some-snap") c.Assert(osutil.FileExists(cookieFile), Equals, true) - data, err := ioutil.ReadFile(cookieFile) + data, err := os.ReadFile(cookieFile) c.Assert(err, IsNil) c.Assert(newCookies[string(data)], NotNil) c.Assert(newCookies[string(data)], Equals, "some-snap") cookieFile = filepath.Join(dirs.SnapCookieDir, "snap.other-snap") c.Assert(osutil.FileExists(cookieFile), Equals, true) - data, err = ioutil.ReadFile(cookieFile) + data, err = os.ReadFile(cookieFile) c.Assert(err, IsNil) c.Assert(newCookies[string(data)], NotNil) c.Assert(newCookies[string(data)], Equals, "other-snap") diff -Nru snapd-2.62+23.10/overlord/snapstate/export_test.go snapd-2.63+23.10/overlord/snapstate/export_test.go --- snapd-2.62+23.10/overlord/snapstate/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -48,6 +48,8 @@ TooSoonError = tooSoonError ) +var ComponentSetupTask = componentSetupTask + const ( None = none Full = full diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers.go snapd-2.63+23.10/overlord/snapstate/handlers.go --- snapd-2.62+23.10/overlord/snapstate/handlers.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers.go 2024-04-24 00:00:39.000000000 +0000 @@ -110,29 +110,35 @@ return &snapsup, nil } -// SetTaskSnapSetup writes the given SnapSetup to the provided task's -// snap-setup-task Task, or to the task itself if the task does not have a -// snap-setup-task (i.e. it _is_ the snap-setup-task) -func SetTaskSnapSetup(t *state.Task, snapsup *SnapSetup) error { +func snapSetupTask(t *state.Task) (*state.Task, error) { if t.Has("snap-setup") { - // this is the snap-setup-task so just write to the task directly - t.Set("snap-setup", snapsup) + // this is the snap-setup-task so just return the task directly + return t, nil } else { - // this task isn't the snap-setup-task, so go get that and write to that - // one + // this task isn't the snap-setup-task, so go get that var id string err := t.Get("snap-setup-task", &id) if err != nil { - return err + return nil, err } ts := t.State().Task(id) if ts == nil { - return fmt.Errorf("internal error: tasks are being pruned") + return nil, fmt.Errorf("internal error: tasks are being pruned") } - ts.Set("snap-setup", snapsup) + return ts, nil } +} +// SetTaskSnapSetup writes the given SnapSetup to the provided task's +// snap-setup-task Task, or to the task itself if the task does not have a +// snap-setup-task (i.e. it _is_ the snap-setup-task) +func SetTaskSnapSetup(t *state.Task, snapsup *SnapSetup) error { + ts, err := snapSetupTask(t) + if err != nil { + return err + } + ts.Set("snap-setup", snapsup) return nil } @@ -4885,8 +4891,8 @@ st.Unlock() pm := NewTaskProgressAdapterUnlocked(t) - timings.Run(perfTimings, "setup-kernel-snap", - fmt.Sprintf("setup of kernel snap %q", snapsup.InstanceName()), + timings.Run(perfTimings, "prepare-kernel-snap", + fmt.Sprintf("preparing kernel snap %q", snapsup.InstanceName()), func(timings.Measurer) { err = m.backend.SetupKernelSnap( snapsup.InstanceName(), snapsup.Revision(), pm) @@ -4899,8 +4905,11 @@ perfTimings.Save(st) // Needed so the old drivers tree can be removed later - prevRev := snapSt.Current - t.Change().Set("previous-kernel-rev", prevRev) + setupTask, err := snapSetupTask(t) + if err != nil { + return err + } + setupTask.Set("previous-kernel-rev", snapSt.Current) // Make sure we won't be rerun t.SetStatus(state.DoneStatus) @@ -4955,23 +4964,26 @@ return err } - // This is stored by doSetupKernelSnap - now after the reboot triggered - // after linking the new snap, we can remove the old drivers tree. - var prevRev snap.Revision - err = t.Change().Get("previous-kernel-rev", &prevRev) + // Now after the reboot triggered after linking the new snap, we can + // remove the old drivers tree if this was not the first installation. + setupTask, err := snapSetupTask(t) if err != nil { return err } + var prevKernelRev snap.Revision + err = setupTask.Get("previous-kernel-rev", &prevKernelRev) + if err != nil && !errors.Is(err, state.ErrNoState) { + return err + } - // Might be unset on first installation - if !prevRev.Unset() { + if !prevKernelRev.Unset() { st.Unlock() pm := NewTaskProgressAdapterUnlocked(t) - timings.Run(perfTimings, "remove-old-kernel-snap-setup", - fmt.Sprintf("cleanup of previous kernel snap %q", currInfo.InstanceName()), + timings.Run(perfTimings, "discard-old-kernel-snap-setup", + fmt.Sprintf("discard previous kernel snap set-up %q", currInfo.InstanceName()), func(timings.Measurer) { err = m.backend.RemoveKernelSnapSetup( - currInfo.InstanceName(), prevRev, pm) + currInfo.InstanceName(), prevKernelRev, pm) }) st.Lock() if err != nil { @@ -5002,21 +5014,25 @@ return err } - // Now we must re-do the previous revision kernel drivers tree - var prevRev snap.Revision - err = t.Change().Get("previous-kernel-rev", &prevRev) + setupTask, err := snapSetupTask(t) if err != nil { return err } + var prevKernelRev snap.Revision + err = setupTask.Get("previous-kernel-rev", &prevKernelRev) + if err != nil && !errors.Is(err, state.ErrNoState) { + return err + } - if !prevRev.Unset() { + // Now we must re-do the previous revision kernel drivers tree + if !prevKernelRev.Unset() { st.Unlock() pm := NewTaskProgressAdapterUnlocked(t) timings.Run(perfTimings, "undo-remove-old-kernel-snap-setup", fmt.Sprintf("undo cleanup of previous kernel snap %q", currInfo.InstanceName()), func(timings.Measurer) { err = m.backend.SetupKernelSnap( - currInfo.InstanceName(), prevRev, pm) + currInfo.InstanceName(), prevKernelRev, pm) }) st.Lock() if err != nil { diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components.go snapd-2.63+23.10/overlord/snapstate/handlers_components.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components.go 2024-04-24 00:00:39.000000000 +0000 @@ -80,20 +80,49 @@ return csup, ssup, &snapst, nil } +// componentSetupTask returns the task that contains the ComponentSetup +// identified by the component-setup-task contained by task t, or directly it +// returns t if it contains a ComponentSetup. +func componentSetupTask(t *state.Task) (*state.Task, error) { + if t.Has("component-setup") { + return t, nil + } else { + // this task isn't the component-setup-task, so go get that and + // write to that one + var id string + err := t.Get("component-setup-task", &id) + if err != nil { + return nil, err + } + + ts := t.State().Task(id) + if ts == nil { + return nil, fmt.Errorf("internal error: tasks are being pruned") + } + return ts, nil + } +} + func (m *SnapManager) doPrepareComponent(t *state.Task, _ *tomb.Tomb) error { st := t.State() st.Lock() defer st.Unlock() - compSetup, _, err := TaskComponentSetup(t) + compSetup, _, snapSt, err := compSetupAndState(t) if err != nil { return err } if compSetup.Revision().Unset() { - // This is a local installation, revision is -1 (there - // is no history of local revisions for components). - compSetup.CompSideInfo.Revision = snap.R(-1) + // This is a local installation, revision is -1 if the current + // one is non-local or not installed, or current one + // decremented by one otherwise. + revision := snap.R(-1) + current := snapSt.CurrentComponentSideInfo(compSetup.CompSideInfo.Component) + if current != nil && current.Revision.N < 0 { + revision = snap.R(current.Revision.N - 1) + } + compSetup.CompSideInfo.Revision = revision } t.Set("component-setup", compSetup) @@ -122,7 +151,7 @@ csi := compSetup.CompSideInfo cpi := snap.MinimalComponentContainerPlaceInfo(compSetup.ComponentName(), - csi.Revision, snapsup.InstanceName(), snapsup.Revision()) + csi.Revision, snapsup.InstanceName()) defer func() { st.Lock() @@ -211,6 +240,11 @@ return err } + return m.undoSetupComponent(t, compSetup.CompSideInfo, snapsup.InstanceName()) +} + +func (m *SnapManager) undoSetupComponent(t *state.Task, csi *snap.ComponentSideInfo, instanceName string) error { + st := t.State() st.Lock() deviceCtx, err := DeviceCtx(st, t, nil) st.Unlock() @@ -227,18 +261,14 @@ return err } - csi := compSetup.CompSideInfo - cpi := snap.MinimalComponentContainerPlaceInfo(compSetup.ComponentName(), - csi.Revision, snapsup.InstanceName(), snapsup.Revision()) + cpi := snap.MinimalComponentContainerPlaceInfo(csi.Component.ComponentName, + csi.Revision, instanceName) pm := NewTaskProgressAdapterUnlocked(t) if err := m.backend.UndoSetupComponent(cpi, &installRecord, deviceCtx, pm); err != nil { return err } - st.Lock() - defer st.Unlock() - return m.backend.RemoveComponentDir(cpi) } @@ -280,6 +310,14 @@ } snapSt.LastCompRefreshTime[compSetup.ComponentName()] = timeNow() + // Create the symlink + csi := cs.SideInfo + cpi := snap.MinimalComponentContainerPlaceInfo(csi.Component.ComponentName, + csi.Revision, snapInfo.InstanceName()) + if err := m.backend.LinkComponent(cpi, snapInfo.Revision); err != nil { + return err + } + // Finally, write the state Set(st, snapsup.InstanceName(), snapSt) // Make sure we won't be rerun @@ -314,6 +352,15 @@ // Restore old state // relinking of the old component is done in the undo of unlink-current-snap + + // Remove the symlink + csi := linkedComp.SideInfo + cpi := snap.MinimalComponentContainerPlaceInfo(csi.Component.ComponentName, + csi.Revision, snapInfo.InstanceName()) + if err := m.backend.UnlinkComponent(cpi, snapInfo.Revision); err != nil { + return err + } + snapSt.Sequence.RemoveComponentForRevision(snapInfo.Revision, linkedComp.SideInfo.Component) @@ -350,8 +397,21 @@ return fmt.Errorf("internal error while unlinking: %s expected but not found", cref) } - // set information for undoUnlinkCurrentComponent in the task - t.Set("unlinked-component", unlinkedComp) + // Remove symlink + csi := unlinkedComp.SideInfo + cpi := snap.MinimalComponentContainerPlaceInfo(csi.Component.ComponentName, + csi.Revision, snapInfo.InstanceName()) + if err := m.backend.UnlinkComponent(cpi, snapInfo.Revision); err != nil { + return err + } + + // set information for undoUnlinkCurrentComponent/doDiscardComponent in + // the setup task + setupTask, err := componentSetupTask(t) + if err != nil { + return err + } + setupTask.Set("unlinked-component", *unlinkedComp) // Finally, write the state Set(st, snapsup.InstanceName(), snapSt) @@ -379,16 +439,28 @@ return err } - var unlinkedComp sequence.ComponentState - err = t.Get("unlinked-component", &unlinkedComp) + setupTask, err := componentSetupTask(t) if err != nil { return err } + var unlinkedComp sequence.ComponentState + if err := setupTask.Get("unlinked-component", &unlinkedComp); err != nil { + return fmt.Errorf("internal error: no unlinked component: err") + } - if err := snapSt.Sequence.AddComponentForRevision(snapInfo.Revision, &unlinkedComp); err != nil { + if err := snapSt.Sequence.AddComponentForRevision( + snapInfo.Revision, &unlinkedComp); err != nil { return fmt.Errorf("internal error while undo unlink component: %w", err) } + // Re-create the symlink + csi := unlinkedComp.SideInfo + cpi := snap.MinimalComponentContainerPlaceInfo(csi.Component.ComponentName, + csi.Revision, snapInfo.InstanceName()) + if err := m.backend.LinkComponent(cpi, snapInfo.Revision); err != nil { + return err + } + // Finally, write the state Set(st, snapsup.InstanceName(), snapSt) // Make sure we won't be rerun @@ -396,3 +468,103 @@ return nil } + +func (m *SnapManager) doSetupKernelModules(t *state.Task, _ *tomb.Tomb) error { + // invariant: component not linked yet + st := t.State() + + // snapSt is a copy of the current state + st.Lock() + compSetup, snapsup, snapSt, err := compSetupAndState(t) + st.Unlock() + if err != nil { + return err + } + + // kernel-modules components already in the system + kmodComps := snapSt.Sequence.ComponentsWithTypeForRev(snapsup.Revision(), snap.KernelModulesComponent) + + // Set-up the new kernel modules component - called with unlocked state + // as it can take a couple of seconds. + pm := NewTaskProgressAdapterUnlocked(t) + err = m.backend.SetupKernelModulesComponents( + []*snap.ComponentSideInfo{compSetup.CompSideInfo}, + kmodComps, snapsup.InstanceName(), snapsup.Revision(), pm) + if err != nil { + return err + } + + // Make sure we won't be rerun + st.Lock() + defer st.Unlock() + t.SetStatus(state.DoneStatus) + return nil +} + +func (m *SnapManager) doRemoveKernelModulesSetup(t *state.Task, _ *tomb.Tomb) error { + // invariant: component unlinked on undo + st := t.State() + + // snapSt is a copy of the current state + st.Lock() + compSetup, snapsup, snapSt, err := compSetupAndState(t) + st.Unlock() + if err != nil { + return err + } + + // current kernel-modules components in the system + st.Lock() + kmodComps := snapSt.Sequence.ComponentsWithTypeForRev(snapsup.Revision(), snap.KernelModulesComponent) + st.Unlock() + + // Restore kernel modules components state - called with unlocked state + // as it can take a couple of seconds. + pm := NewTaskProgressAdapterUnlocked(t) + // Component from compSetup has already been unlinked, so it is not in kmodComps + err = m.backend.RemoveKernelModulesComponentsSetup( + []*snap.ComponentSideInfo{compSetup.CompSideInfo}, + kmodComps, snapsup.InstanceName(), snapsup.Revision(), pm) + if err != nil { + return err + } + + // Make sure we won't be rerun + st.Lock() + defer st.Unlock() + t.SetStatus(state.UndoneStatus) + return nil +} + +func infoForCompUndo(t *state.Task) (*snap.ComponentSideInfo, string, error) { + st := t.State() + st.Lock() + defer st.Unlock() + + _, snapsup, err := TaskComponentSetup(t) + if err != nil { + return nil, "", err + } + + setupTask, err := componentSetupTask(t) + if err != nil { + return nil, "", err + } + var unlinkedComp sequence.ComponentState + err = setupTask.Get("unlinked-component", &unlinkedComp) + if err != nil { + return nil, "", fmt.Errorf("internal error: no component to discard: %w", err) + } + + return unlinkedComp.SideInfo, snapsup.InstanceName(), nil +} + +func (m *SnapManager) doDiscardComponent(t *state.Task, _ *tomb.Tomb) error { + compSideInfo, instanceName, err := infoForCompUndo(t) + if err != nil { + return err + } + + // Discard the previously unlinked component + return m.undoSetupComponent(t, compSideInfo, instanceName) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components_discard_test.go snapd-2.63+23.10/overlord/snapstate/handlers_components_discard_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components_discard_test.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components_discard_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,130 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package snapstate_test + +import ( + "github.com/snapcore/snapd/overlord/snapstate" + "github.com/snapcore/snapd/overlord/snapstate/sequence" + "github.com/snapcore/snapd/overlord/snapstate/snapstatetest" + "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/snap/naming" + . "gopkg.in/check.v1" +) + +type discardCompSnapSuite struct { + baseHandlerSuite +} + +var _ = Suite(&discardCompSnapSuite{}) + +func (s *discardCompSnapSuite) SetUpTest(c *C) { + s.baseHandlerSuite.SetUpTest(c) + s.AddCleanup(snapstatetest.MockDeviceModel(DefaultModel())) +} + +func (s *discardCompSnapSuite) TestDoDiscardComponent(c *C) { + const snapName = "mysnap" + const compName = "mycomp" + snapRev := snap.R(1) + compRev := snap.R(7) + ci, compPath := createTestComponent(c, snapName, compName) + si := createTestSnapInfoForComponent(c, snapName, snapRev, compName) + ssu := createTestSnapSetup(si, snapstate.Flags{}) + s.AddCleanup(snapstate.MockReadComponentInfo(func( + compMntDir string) (*snap.ComponentInfo, error) { + return ci, nil + })) + + s.state.Lock() + + t := s.state.NewTask("discard-component", "task desc") + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + compDiscardRev := snap.R(5) + csiToDiscard := snap.NewComponentSideInfo(cref, compDiscardRev) + csToDiscard := sequence.NewComponentState(csiToDiscard, snap.TestComponent) + compsup := snapstate.NewComponentSetup(csi, snap.TestComponent, compPath) + t.Set("component-setup", compsup) + t.Set("unlinked-component", *csToDiscard) + t.Set("snap-setup", ssu) + chg := s.state.NewChange("test change", "change desc") + chg.AddTask(t) + + s.state.Unlock() + + s.se.Ensure() + s.se.Wait() + + s.state.Lock() + c.Check(chg.Err(), IsNil) + s.state.Unlock() + + // Ensure backend calls have happened with the expected data + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "undo-setup-component", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_5.comp", + }, + { + op: "remove-component-dir", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_5.comp", + }, + }) +} + +func (s *discardCompSnapSuite) TestDoDiscardComponentNoUnlinkedComp(c *C) { + const snapName = "mysnap" + const compName = "mycomp" + snapRev := snap.R(1) + compRev := snap.R(7) + ci, compPath := createTestComponent(c, snapName, compName) + si := createTestSnapInfoForComponent(c, snapName, snapRev, compName) + ssu := createTestSnapSetup(si, snapstate.Flags{}) + s.AddCleanup(snapstate.MockReadComponentInfo(func( + compMntDir string) (*snap.ComponentInfo, error) { + return ci, nil + })) + + s.state.Lock() + + t := s.state.NewTask("discard-component", "task desc") + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + // No unlinked component in the task + compsup := snapstate.NewComponentSetup(csi, snap.TestComponent, compPath) + t.Set("component-setup", compsup) + t.Set("snap-setup", ssu) + chg := s.state.NewChange("test change", "change desc") + chg.AddTask(t) + + s.state.Unlock() + + s.se.Ensure() + s.se.Wait() + + s.state.Lock() + c.Check(chg.Err().Error(), Equals, "cannot perform the following tasks:\n"+ + "- task desc (internal error: no component to discard: no state entry for key \"unlinked-component\")") + s.state.Unlock() + + c.Check(s.fakeBackend.ops, IsNil) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components_kernel_test.go snapd-2.63+23.10/overlord/snapstate/handlers_components_kernel_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components_kernel_test.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components_kernel_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,179 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package snapstate_test + +import ( + . "gopkg.in/check.v1" + + "github.com/snapcore/snapd/overlord/snapstate" + "github.com/snapcore/snapd/overlord/snapstate/sequence" + "github.com/snapcore/snapd/overlord/snapstate/snapstatetest" + "github.com/snapcore/snapd/overlord/state" + "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/snap/naming" +) + +type setupKernelComponentsSuite struct { + baseHandlerSuite +} + +var _ = Suite(&setupKernelComponentsSuite{}) + +func (s *setupKernelComponentsSuite) SetUpTest(c *C) { + s.baseHandlerSuite.SetUpTest(c) + s.AddCleanup(snapstatetest.MockDeviceModel(DefaultModel())) +} + +func (s *setupKernelComponentsSuite) TestSetupKernelModules(c *C) { + s.testSetupKernelModules(c, "mykernel", "") +} + +func (s *setupKernelComponentsSuite) TestSetupKernelModulesFails(c *C) { + s.testSetupKernelModules(c, "mykernel+broken", + "cannot perform the following tasks:\n- test kernel modules (cannot set-up kernel-modules for mykernel+broken)") +} + +func (s *setupKernelComponentsSuite) testSetupKernelModules(c *C, snapName, errStr string) { + snapRev := snap.R(77) + const compName = "kcomp" + + s.state.Lock() + + // add some components to the state + csi1 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, compName), snap.R(1)) + csi2 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, "other-comp"), snap.R(33)) + cs1 := sequence.NewComponentState(csi1, snap.KernelModulesComponent) + cs2 := sequence.NewComponentState(csi2, snap.KernelModulesComponent) + setStateWithComponents(s.state, snapName, snapRev, []*sequence.ComponentState{cs1, cs2}) + + t := s.state.NewTask("prepare-kernel-modules-components", "test kernel modules") + t.Set("snap-setup", &snapstate.SnapSetup{ + SideInfo: &snap.SideInfo{ + RealName: snapName, + Revision: snapRev, + }, + }) + compRev := snap.R(7) + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + t.Set("component-setup", + snapstate.NewComponentSetup(csi, snap.KernelModulesComponent, "")) + chg := s.state.NewChange("test change", "change desc") + chg.AddTask(t) + + s.state.Unlock() + + s.se.Ensure() + s.se.Wait() + + s.state.Lock() + if errStr == "" { + c.Check(chg.Err(), IsNil) + c.Check(t.Status(), Equals, state.DoneStatus) + } else { + c.Check(chg.Err().Error(), Equals, errStr) + c.Check(t.Status(), Equals, state.ErrorStatus) + } + s.state.Unlock() + + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "setup-kernel-modules-components", + compsToInstall: []*snap.ComponentSideInfo{csi}, + currentComps: []*snap.ComponentSideInfo{csi1, csi2}, + }, + }) +} + +func (s *setupKernelComponentsSuite) TestRemoveKernelModulesSetup(c *C) { + s.testRemoveKernelModulesSetup(c, "mykernel", "") +} + +func (s *setupKernelComponentsSuite) TestRemoveKernelModulesSetupFails(c *C) { + s.testRemoveKernelModulesSetup(c, "mykernel+reverterr", + "(?s).*cannot remove set-up of kernel-modules for mykernel\\+reverterr.*") +} + +func (s *setupKernelComponentsSuite) testRemoveKernelModulesSetup(c *C, snapName, errStr string) { + snapRev := snap.R(77) + const compName = "kcomp" + + s.state.Lock() + + // add some components to the state + csi1 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, compName), snap.R(1)) + csi2 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, "other-comp"), snap.R(33)) + cs1 := sequence.NewComponentState(csi1, snap.KernelModulesComponent) + cs2 := sequence.NewComponentState(csi2, snap.KernelModulesComponent) + setStateWithComponents(s.state, snapName, snapRev, []*sequence.ComponentState{cs1, cs2}) + + t := s.state.NewTask("prepare-kernel-modules-components", "test kernel modules") + t.Set("snap-setup", &snapstate.SnapSetup{ + SideInfo: &snap.SideInfo{ + RealName: snapName, + Revision: snapRev, + }, + }) + compRev := snap.R(7) + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + t.Set("component-setup", + snapstate.NewComponentSetup(csi, snap.KernelModulesComponent, "")) + chg := s.state.NewChange("test change", "change desc") + chg.AddTask(t) + + terr := s.state.NewTask("error-trigger", "provoking undo") + terr.WaitFor(t) + chg.AddTask(terr) + + s.state.Unlock() + + for i := 0; i < 3; i++ { + s.se.Ensure() + s.se.Wait() + } + + s.state.Lock() + + if errStr == "" { + c.Check(chg.Err(), ErrorMatches, "(?s)cannot perform the following tasks:\n"+ + "- provoking undo \\(error out\\)") + c.Check(t.Status(), Equals, state.UndoneStatus) + } else { + c.Check(chg.Err(), ErrorMatches, "(?s).* provoking undo \\(error out\\).*") + c.Check(chg.Err(), ErrorMatches, errStr) + c.Check(t.Status(), Equals, state.ErrorStatus) + } + + s.state.Unlock() + + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "setup-kernel-modules-components", + compsToInstall: []*snap.ComponentSideInfo{csi}, + currentComps: []*snap.ComponentSideInfo{csi1, csi2}, + }, + { + op: "remove-kernel-modules-components-setup", + compsToRemove: []*snap.ComponentSideInfo{csi}, + finalComps: []*snap.ComponentSideInfo{csi1, csi2}, + }, + }) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components_link_test.go snapd-2.63+23.10/overlord/snapstate/handlers_components_link_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components_link_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components_link_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,8 +20,10 @@ package snapstate_test import ( + "path/filepath" "time" + "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/overlord/snapstate" "github.com/snapcore/snapd/overlord/snapstate/sequence" "github.com/snapcore/snapd/overlord/snapstate/snapstatetest" @@ -80,6 +82,15 @@ cs := sequence.NewComponentState(csi, snap.TestComponent) t.Get("linked-component", &storedCs) c.Assert(&storedCs, DeepEquals, cs) + // the link has been created + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "link-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + }) // state is modified as expected var snapst snapstate.SnapState c.Assert(snapstate.Get(s.state, snapName, &snapst), IsNil) @@ -154,6 +165,21 @@ cs := sequence.NewComponentState(csi, snap.TestComponent) t.Get("linked-component", &storedCs) c.Assert(&storedCs, DeepEquals, cs) + // the link has been created and then removed + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "link-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + { + op: "unlink-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + }) // the component is not in the state var snapst snapstate.SnapState c.Assert(snapstate.Get(s.state, snapName, &snapst), IsNil) @@ -211,10 +237,19 @@ c.Check(chg.Err(), IsNil) // undo information has been stored - var storedCs sequence.ComponentState - cs := sequence.NewComponentState(csi, snap.TestComponent) - t.Get("unlinked-component", &storedCs) - c.Assert(&storedCs, DeepEquals, cs) + var unlinkedComp sequence.ComponentState + c.Assert(t.Get("unlinked-component", &unlinkedComp), IsNil) + c.Assert(&unlinkedComp, DeepEquals, + sequence.NewComponentState(csi, snap.TestComponent)) + // the link has been removed + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "unlink-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + }) // state is modified as expected var snapst snapstate.SnapState c.Assert(snapstate.Get(s.state, snapName, &snapst), IsNil) @@ -256,6 +291,68 @@ s.testDoUnlinkCurrentComponent(c, snapName, snapRev, compName, compRev) } +func (s *linkCompSnapSuite) TestDoUnlinkCurrentComponentTwoTasks(c *C) { + const snapName = "mysnap" + const compName = "mycomp" + snapRev := snap.R(1) + compRev := snap.R(7) + + s.state.Lock() + // state must contain the component + setStateWithOneComponent(s.state, snapName, snapRev, compName, compRev) + s.state.Unlock() + + si := createTestSnapInfoForComponent(c, snapName, snapRev, compName) + ssu := createTestSnapSetup(si, snapstate.Flags{}) + + s.state.Lock() + + ts := s.state.NewTask("nop", "first task") + t := s.state.NewTask("unlink-current-component", "task desc") + t.WaitFor(ts) + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + ts.Set("component-setup", snapstate.NewComponentSetup(csi, snap.TestComponent, "")) + ts.Set("snap-setup", ssu) + t.Set("snap-setup-task", ts.ID()) + t.Set("component-setup-task", ts.ID()) + chg := s.state.NewChange("test change", "change desc") + chg.AddTask(ts) + chg.AddTask(t) + + s.state.Unlock() + + for i := 0; i < 3; i++ { + s.se.Ensure() + s.se.Wait() + } + + s.state.Lock() + + c.Check(chg.Err(), IsNil) + // undo information has been stored in the setup task + var unlinkedComp sequence.ComponentState + c.Assert(ts.Get("unlinked-component", &unlinkedComp), IsNil) + c.Assert(&unlinkedComp, DeepEquals, + sequence.NewComponentState(csi, snap.TestComponent)) + // the link has been removed + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "unlink-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + }) + // state is modified as expected + var snapst snapstate.SnapState + c.Assert(snapstate.Get(s.state, snapName, &snapst), IsNil) + c.Assert(snapst.CurrentComponentSideInfo(cref), IsNil) + c.Assert(t.Status(), Equals, state.DoneStatus) + + s.state.Unlock() +} + func (s *linkCompSnapSuite) testDoUnlinkThenUndoUnlinkCurrentComponent(c *C, snapName string, snapRev snap.Revision, compName string, compRev snap.Revision) { si := createTestSnapInfoForComponent(c, snapName, snapRev, compName) ssu := createTestSnapSetup(si, snapstate.Flags{}) @@ -286,10 +383,25 @@ c.Check(chg.Err().Error(), Equals, "cannot perform the following tasks:\n"+ "- provoking undo link (error out)") // undo information was stored - var storedCs sequence.ComponentState - cs := sequence.NewComponentState(csi, snap.TestComponent) - t.Get("unlinked-component", &storedCs) - c.Assert(&storedCs, DeepEquals, cs) + var unlinkedComp sequence.ComponentState + c.Assert(t.Get("unlinked-component", &unlinkedComp), IsNil) + c.Assert(&unlinkedComp, DeepEquals, + sequence.NewComponentState(csi, snap.TestComponent)) + // the link has been removed and then re-created + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "unlink-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + { + op: "link-component", + path: filepath.Join( + dirs.SnapMountDir, snapName, "components", + "mnt", compName, compRev.String()), + }, + }) // the component is still in the state var snapst snapstate.SnapState c.Assert(snapstate.Get(s.state, snapName, &snapst), IsNil) diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components_mount_test.go snapd-2.63+23.10/overlord/snapstate/handlers_components_mount_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components_mount_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components_mount_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -76,7 +76,9 @@ // Ensure backend calls have happened with the expected data c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-component", + op: "setup-component", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, }) // File not removed @@ -127,13 +129,19 @@ // ensure undo was called the right way c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-component", + op: "setup-component", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, { - op: "undo-setup-component", + op: "undo-setup-component", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, { - op: "remove-component-dir", + op: "remove-component-dir", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, }) } @@ -177,10 +185,14 @@ // ensure undo was called the right way c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-component", + op: "setup-component", + containerName: "mysnap+broken", + containerFileName: "mysnap+broken_7.comp", }, { - op: "remove-component-dir", + op: "remove-component-dir", + containerName: "mysnap+broken", + containerFileName: "mysnap+broken_7.comp", }, }) } @@ -231,10 +243,14 @@ // ensure undo was called the right way c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-component", + op: "setup-component", + containerName: "mysnap+brokenundo", + containerFileName: "mysnap+brokenundo_7.comp", }, { - op: "undo-setup-component", + op: "undo-setup-component", + containerName: "mysnap+brokenundo", + containerFileName: "mysnap+brokenundo_7.comp", }, }) } @@ -278,13 +294,19 @@ // ensure undo was called the right way c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-component", + op: "setup-component", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, { - op: "undo-setup-component", + op: "undo-setup-component", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, { - op: "remove-component-dir", + op: "remove-component-dir", + containerName: "mysnap+mycomp", + containerFileName: "mysnap+mycomp_7.comp", }, }) } diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components_prepare_test.go snapd-2.63+23.10/overlord/snapstate/handlers_components_prepare_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components_prepare_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components_prepare_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -68,3 +68,42 @@ )) c.Check(t.Status(), Equals, state.DoneStatus) } + +func (s *prepareSnapSuite) TestDoPrepareComponentAlreadyPresent(c *C) { + const snapName = "mysnap" + const compName = "mycomp" + snapRev := snap.R(1) + // Unset component revision + compRev := snap.R(0) + si := createTestSnapInfoForComponent(c, snapName, snapRev, compName) + ssu := createTestSnapSetup(si, snapstate.Flags{}) + + s.state.Lock() + + // state with some component around already + setStateWithOneComponent(s.state, snapName, snapRev, compName, snap.R(-1)) + + t := s.state.NewTask("prepare-component", "task desc") + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + t.Set("component-setup", snapstate.NewComponentSetup(csi, snap.TestComponent, "path-to-component")) + t.Set("snap-setup", ssu) + + s.state.NewChange("test change", "change desc").AddTask(t) + + s.state.Unlock() + + s.se.Ensure() + s.se.Wait() + + s.state.Lock() + defer s.state.Unlock() + + var csup snapstate.ComponentSetup + t.Get("component-setup", &csup) + // Revision should have been set to x2 (-2) + c.Check(csup.CompSideInfo, DeepEquals, snap.NewComponentSideInfo( + cref, snap.R(-2), + )) + c.Check(t.Status(), Equals, state.DoneStatus) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_components_test.go snapd-2.63+23.10/overlord/snapstate/handlers_components_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_components_test.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_components_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,95 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2024 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package snapstate_test + +import ( + "github.com/snapcore/snapd/overlord/snapstate" + "github.com/snapcore/snapd/overlord/snapstate/snapstatetest" + "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/snap/naming" + . "gopkg.in/check.v1" +) + +type handlersComponentsSuite struct { + baseHandlerSuite +} + +var _ = Suite(&handlersComponentsSuite{}) + +func (s *handlersComponentsSuite) SetUpTest(c *C) { + s.baseHandlerSuite.SetUpTest(c) + s.AddCleanup(snapstatetest.MockDeviceModel(DefaultModel())) +} + +func (s *handlersComponentsSuite) TestComponentSetupTaskFirstTask(c *C) { + s.state.Lock() + defer s.state.Unlock() + + // make a new task which will be the component-setup-task for + // other tasks and write a ComponentSetup to it + t := s.state.NewTask("prepare-component", "test") + const snapName = "mysnap" + const compName = "mycomp" + compRev := snap.R(7) + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + compsup := snapstate.NewComponentSetup(csi, snap.KernelModulesComponent, "") + t.Set("component-setup", compsup) + s.state.NewChange("sample", "...").AddTask(t) + + // Check that the returned task contains the data + setupTask, err := snapstate.ComponentSetupTask(t) + c.Assert(err, IsNil) + var newcompsup snapstate.ComponentSetup + err = setupTask.Get("component-setup", &newcompsup) + c.Assert(err, IsNil) +} + +func (s *handlersComponentsSuite) TestComponentSetupTaskLaterTask(c *C) { + s.state.Lock() + defer s.state.Unlock() + t := s.state.NewTask("prepare-component", "test") + + const snapName = "mysnap" + const compName = "mycomp" + compRev := snap.R(7) + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + compsup := snapstate.NewComponentSetup(csi, snap.KernelModulesComponent, "") + // setup component-setup for the first task + t.Set("component-setup", compsup) + + // make a new task and reference the first one in component-setup-task + t2 := s.state.NewTask("next-task-comp", "test2") + t2.Set("component-setup-task", t.ID()) + + chg := s.state.NewChange("sample", "...") + chg.AddTask(t) + chg.AddTask(t2) + + // Check that the returned task contains the data + setupTask, err := snapstate.ComponentSetupTask(t2) + c.Assert(err, IsNil) + var newcompsup snapstate.ComponentSetup + err = setupTask.Get("component-setup", &newcompsup) + c.Assert(err, IsNil) + // and is the expected task + c.Assert(setupTask.ID(), Equals, t.ID()) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_link_test.go snapd-2.63+23.10/overlord/snapstate/handlers_link_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_link_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_link_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -339,7 +338,7 @@ c.Assert(err, IsNil) // and check that the sequence file got updated - seqContent, err := ioutil.ReadFile(filepath.Join(dirs.SnapSeqDir, "foo.json")) + seqContent, err := os.ReadFile(filepath.Join(dirs.SnapSeqDir, "foo.json")) c.Assert(err, IsNil) c.Check(string(seqContent), Equals, `{"sequence":[{"name":"foo","snap-id":"","revision":"11"},{"name":"foo","snap-id":"","revision":"33"}],"current":"33","migrated-hidden":false,"migrated-exposed-home":false}`) } @@ -404,7 +403,7 @@ c.Check(t.Status(), Equals, state.UndoneStatus) // and check that the sequence file got updated - seqContent, err := ioutil.ReadFile(filepath.Join(dirs.SnapSeqDir, "foo.json")) + seqContent, err := os.ReadFile(filepath.Join(dirs.SnapSeqDir, "foo.json")) c.Assert(err, IsNil) c.Check(string(seqContent), Equals, `{"sequence":[],"current":"unset","migrated-hidden":false,"migrated-exposed-home":false}`) diff -Nru snapd-2.62+23.10/overlord/snapstate/handlers_setup_kernel_test.go snapd-2.63+23.10/overlord/snapstate/handlers_setup_kernel_test.go --- snapd-2.62+23.10/overlord/snapstate/handlers_setup_kernel_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/handlers_setup_kernel_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -46,7 +46,7 @@ s.state.Lock() - t := s.state.NewTask("setup-kernel-snap", "test kernel setup") + t := s.state.NewTask("prepare-kernel-snap", "test kernel setup") t.Set("snap-setup", &snapstate.SnapSetup{ SideInfo: &snap.SideInfo{ RealName: "mykernel", @@ -65,14 +65,57 @@ s.state.Lock() c.Check(chg.Err(), IsNil) c.Check(t.Status(), Equals, state.DoneStatus) - var prevRev snap.Revision - c.Check(chg.Get("previous-kernel-rev", &prevRev), IsNil) - c.Check(prevRev, Equals, snap.R(0)) + var prevKernelRev snap.Revision + c.Check(t.Get("previous-kernel-rev", &prevKernelRev), IsNil) + c.Check(prevKernelRev, Equals, snap.R(0)) s.state.Unlock() c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-kernel-snap", + op: "prepare-kernel-snap", + }, + }) +} + +func (s *setupKernelSnapSuite) TestSetupKernelSnapTwoTasks(c *C) { + v1 := "name: mykernel\nversion: 1.0\ntype: kernel\n" + testSnap := snaptest.MakeTestSnapWithFiles(c, v1, nil) + + s.state.Lock() + + ts := s.state.NewTask("nop", "first task") + ts.Set("snap-setup", &snapstate.SnapSetup{ + SideInfo: &snap.SideInfo{ + RealName: "mykernel", + Revision: snap.R(33), + }, + SnapPath: testSnap, + }) + t := s.state.NewTask("prepare-kernel-snap", "test kernel setup") + t.Set("snap-setup-task", ts.ID()) + t.WaitFor(ts) + chg := s.state.NewChange("test change", "change desc") + chg.AddTask(ts) + chg.AddTask(t) + + s.state.Unlock() + + for i := 0; i < 3; i++ { + s.se.Ensure() + s.se.Wait() + } + + s.state.Lock() + c.Check(chg.Err(), IsNil) + c.Check(t.Status(), Equals, state.DoneStatus) + var prevKernelRev snap.Revision + c.Check(ts.Get("previous-kernel-rev", &prevKernelRev), IsNil) + c.Check(prevKernelRev, Equals, snap.R(0)) + s.state.Unlock() + + c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ + { + op: "prepare-kernel-snap", }, }) } @@ -83,7 +126,7 @@ s.state.Lock() - t := s.state.NewTask("setup-kernel-snap", "test kernel setup") + t := s.state.NewTask("prepare-kernel-snap", "test kernel setup") t.Set("snap-setup", &snapstate.SnapSetup{ SideInfo: &snap.SideInfo{ RealName: "mykernel", @@ -107,14 +150,14 @@ s.state.Lock() c.Check(chg.Err(), ErrorMatches, `(?s).*provoking undo kernel setup.*`) c.Check(t.Status(), Equals, state.UndoneStatus) - var prevRev snap.Revision - c.Check(chg.Get("previous-kernel-rev", &prevRev), IsNil) - c.Check(prevRev, Equals, snap.R(0)) + var prevKernelRev snap.Revision + c.Check(t.Get("previous-kernel-rev", &prevKernelRev), IsNil) + c.Check(prevKernelRev, Equals, snap.R(0)) s.state.Unlock() c.Check(s.fakeBackend.ops, DeepEquals, fakeOps{ { - op: "setup-kernel-snap", + op: "prepare-kernel-snap", }, { op: "remove-kernel-snap-setup", @@ -135,7 +178,7 @@ Current: snap.R(33), UserID: 1, }) - t := s.state.NewTask("remove-old-kernel-snap-setup", "test remove kernel set-up") + t := s.state.NewTask("discard-old-kernel-snap-setup", "test discard kernel set-up") t.Set("snap-setup", &snapstate.SnapSetup{ SideInfo: &snap.SideInfo{ RealName: "mykernel", @@ -143,9 +186,9 @@ }, SnapPath: testSnap, }) + t.Set("previous-kernel-rev", snap.R(30)) chg := s.state.NewChange("test change", "change desc") chg.AddTask(t) - chg.Set("previous-kernel-rev", snap.R(33)) s.state.Unlock() @@ -177,7 +220,7 @@ Current: snap.R(33), UserID: 1, }) - t := s.state.NewTask("remove-old-kernel-snap-setup", "test kernel setup") + t := s.state.NewTask("discard-old-kernel-snap-setup", "test discard setup") t.Set("snap-setup", &snapstate.SnapSetup{ SideInfo: &snap.SideInfo{ RealName: "mykernel", @@ -185,12 +228,12 @@ }, SnapPath: testSnap, }) + t.Set("previous-kernel-rev", snap.R(30)) chg := s.state.NewChange("test change", "change desc") chg.AddTask(t) terr := s.state.NewTask("error-trigger", "provoking undo kernel cleanup") terr.WaitFor(t) chg.AddTask(terr) - chg.Set("previous-kernel-rev", snap.R(33)) s.state.Unlock() @@ -209,7 +252,7 @@ op: "remove-kernel-snap-setup", }, { - op: "setup-kernel-snap", + op: "prepare-kernel-snap", }, }) } diff -Nru snapd-2.62+23.10/overlord/snapstate/sequence/sequence.go snapd-2.63+23.10/overlord/snapstate/sequence/sequence.go --- snapd-2.62+23.10/overlord/snapstate/sequence/sequence.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/sequence/sequence.go 2024-04-24 00:00:39.000000000 +0000 @@ -148,8 +148,9 @@ } revSt := snapSeq.Revisions[snapIdx] - if revSt.FindComponent(cs.SideInfo.Component) != nil { - // Component already present + if currentCompSt := revSt.FindComponent(cs.SideInfo.Component); currentCompSt != nil { + // Component already present, replace revision + *currentCompSt = *cs return nil } @@ -205,3 +206,24 @@ } return false } + +func (snapSeq *SnapSequence) ComponentsForRevision(rev snap.Revision) []*ComponentState { + for _, rss := range snapSeq.Revisions { + if rss.Snap.Revision == rev { + return rss.Components + } + } + return nil +} + +func (snapSeq *SnapSequence) ComponentsWithTypeForRev(rev snap.Revision, compType snap.ComponentType) []*snap.ComponentSideInfo { + comps := snapSeq.ComponentsForRevision(rev) + kmodComps := make([]*snap.ComponentSideInfo, 0, len(comps)) + for _, comp := range comps { + if comp.CompType != snap.KernelModulesComponent { + continue + } + kmodComps = append(kmodComps, comp.SideInfo) + } + return kmodComps +} diff -Nru snapd-2.62+23.10/overlord/snapstate/sequence/sequence_test.go snapd-2.63+23.10/overlord/snapstate/sequence/sequence_test.go --- snapd-2.62+23.10/overlord/snapstate/sequence/sequence_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/sequence/sequence_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -102,23 +102,26 @@ const compName1 = "comp1" csi1 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, compName1), snap.R(2)) csi2 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, compName1), snap.R(3)) + csi3 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, "other-comp"), snap.R(1)) cs1 := sequence.NewComponentState(csi1, snap.TestComponent) cs2 := sequence.NewComponentState(csi2, snap.TestComponent) + cs3 := sequence.NewComponentState(csi3, snap.TestComponent) ssi := &snap.SideInfo{RealName: snapName, Revision: snap.R(1), SnapID: "some-snap-id"} - comps := []*sequence.ComponentState{cs1, cs2} + sliceCs1 := []*sequence.ComponentState{cs1} seq := snapstatetest.NewSequenceFromRevisionSideInfos( - []*sequence.RevisionSideState{sequence.NewRevisionSideState(ssi, comps)}) + []*sequence.RevisionSideState{sequence.NewRevisionSideState(ssi, sliceCs1)}) c.Assert(seq.AddComponentForRevision(snapRev, cs1), IsNil) // Not re-appended - c.Assert(seq.Revisions[0].Components, DeepEquals, comps) + c.Assert(seq.Revisions[0].Components, DeepEquals, sliceCs1) + + // Replace component with different revision + c.Assert(seq.AddComponentForRevision(snapRev, cs2), IsNil) + c.Assert(seq.Revisions[0].Components, DeepEquals, []*sequence.ComponentState{cs2}) - csi3 := snap.NewComponentSideInfo(naming.NewComponentRef(snapName, "other-comp"), snap.R(1)) - cs3 := sequence.NewComponentState(csi3, snap.TestComponent) c.Assert(seq.AddComponentForRevision(snapRev, cs3), IsNil) - comps = []*sequence.ComponentState{cs1, cs2, cs3} - c.Assert(seq.Revisions[0].Components, DeepEquals, comps) + c.Assert(seq.Revisions[0].Components, DeepEquals, []*sequence.ComponentState{cs2, cs3}) c.Assert(seq.AddComponentForRevision(snap.R(2), cs3), Equals, sequence.ErrSnapRevNotInSequence) } @@ -170,21 +173,26 @@ cref2 := naming.NewComponentRef(snapName, compName2) csi2 := snap.NewComponentSideInfo(cref2, compRev) + rev1Comps := []*sequence.ComponentState{ + sequence.NewComponentState(csi2, snap.TestComponent), + sequence.NewComponentState(csi, snap.TestComponent)} seq := snapstatetest.NewSequenceFromRevisionSideInfos( []*sequence.RevisionSideState{ - sequence.NewRevisionSideState(ssi, - []*sequence.ComponentState{sequence.NewComponentState(csi2, snap.TestComponent), sequence.NewComponentState(csi, snap.TestComponent)})}) + sequence.NewRevisionSideState(ssi, rev1Comps)}) c.Check(seq.IsComponentRevPresent(csi), Equals, true) foundCsi := seq.ComponentSideInfoForRev(0, cref) c.Check(foundCsi, DeepEquals, csi) foundCsi2 := seq.ComponentSideInfoForRev(0, cref2) c.Check(foundCsi2, DeepEquals, csi2) + c.Check(seq.ComponentsForRevision(snapRev), DeepEquals, rev1Comps) + rev1Comps = []*sequence.ComponentState{ + sequence.NewComponentState(csi, snap.TestComponent)} seq = snapstatetest.NewSequenceFromRevisionSideInfos( []*sequence.RevisionSideState{ sequence.NewRevisionSideState(ssi2, nil), - sequence.NewRevisionSideState(ssi, []*sequence.ComponentState{sequence.NewComponentState(csi, snap.TestComponent)}), + sequence.NewRevisionSideState(ssi, rev1Comps), }) c.Check(seq.IsComponentRevPresent(csi), Equals, true) @@ -192,6 +200,8 @@ c.Check(seq.ComponentSideInfoForRev(0, cref2), IsNil) foundCsi = seq.ComponentSideInfoForRev(0, cref) c.Check(foundCsi, IsNil) + c.Check(seq.ComponentsForRevision(snapRev), DeepEquals, rev1Comps) + c.Check(seq.ComponentsForRevision(snapRev2), IsNil) seq = snapstatetest.NewSequenceFromRevisionSideInfos( []*sequence.RevisionSideState{ @@ -203,3 +213,33 @@ c.Check(seq.ComponentSideInfoForRev(0, cref), IsNil) c.Check(seq.ComponentSideInfoForRev(1, cref2), IsNil) } + +func (s *sequenceTestSuite) TestKernelModulesComponentsForRev(c *C) { + const snapName = "mysnap" + const compName = "mycomp" + const compName2 = "mycomp2" + snapRev := snap.R(1) + snapRev2 := snap.R(2) + compRev := snap.R(33) + + ssi := &snap.SideInfo{RealName: snapName, Revision: snapRev, SnapID: "some-snap-id"} + ssi2 := &snap.SideInfo{RealName: snapName, Revision: snapRev2, SnapID: "some-snap-id"} + cref := naming.NewComponentRef(snapName, compName) + csi := snap.NewComponentSideInfo(cref, compRev) + cref2 := naming.NewComponentRef(snapName, compName2) + csi2 := snap.NewComponentSideInfo(cref2, compRev) + + rev1Comps := []*sequence.ComponentState{ + sequence.NewComponentState(csi2, snap.KernelModulesComponent), + sequence.NewComponentState(csi, snap.TestComponent)} + seq := snapstatetest.NewSequenceFromRevisionSideInfos( + []*sequence.RevisionSideState{ + sequence.NewRevisionSideState(ssi2, nil), + sequence.NewRevisionSideState(ssi, rev1Comps), + }) + + c.Check(seq.ComponentsWithTypeForRev(snapRev, snap.KernelModulesComponent), + DeepEquals, []*snap.ComponentSideInfo{csi2}) + c.Check(len(seq.ComponentsWithTypeForRev(snapRev2, snap.KernelModulesComponent)), + Equals, 0) +} diff -Nru snapd-2.62+23.10/overlord/snapstate/snapmgr.go snapd-2.63+23.10/overlord/snapstate/snapmgr.go --- snapd-2.62+23.10/overlord/snapstate/snapmgr.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/snapmgr.go 2024-04-24 00:00:39.000000000 +0000 @@ -521,7 +521,7 @@ } cpi := snap.MinimalComponentContainerPlaceInfo(csi.Component.ComponentName, - csi.Revision, si.InstanceName(), si.SnapRevision()) + csi.Revision, si.InstanceName()) return readComponentInfo(cpi.MountDir()) } @@ -643,8 +643,8 @@ runner.AddHandler("conditional-auto-refresh", m.doConditionalAutoRefresh, nil) // specific set-up for the kernel snap - runner.AddHandler("setup-kernel-snap", m.doSetupKernelSnap, m.undoSetupKernelSnap) - runner.AddHandler("remove-old-kernel-snap-setup", m.doCleanupOldKernelSnap, m.undoCleanupOldKernelSnap) + runner.AddHandler("prepare-kernel-snap", m.doSetupKernelSnap, m.undoSetupKernelSnap) + runner.AddHandler("discard-old-kernel-snap-setup", m.doCleanupOldKernelSnap, m.undoCleanupOldKernelSnap) // FIXME: drop the task entirely after a while // (having this wart here avoids yet-another-patch) @@ -681,6 +681,10 @@ runner.AddHandler("mount-component", m.doMountComponent, m.undoMountComponent) runner.AddHandler("unlink-current-component", m.doUnlinkCurrentComponent, m.undoUnlinkCurrentComponent) runner.AddHandler("link-component", m.doLinkComponent, m.undoLinkComponent) + // We cannot undo much after a component file is removed. And it is the + // last task anyway. + runner.AddHandler("discard-component", m.doDiscardComponent, nil) + runner.AddHandler("prepare-kernel-modules-components", m.doSetupKernelModules, m.doRemoveKernelModulesSetup) // control serialisation runner.AddBlocked(m.blockedTask) @@ -1293,6 +1297,11 @@ if err != nil { return err } + dev, err := DeviceCtx(m.state, nil, nil) + // Ignore error if model assertion not yet known + if err != nil && !errors.Is(err, state.ErrNoState) { + return err + } squashfsPath := dirs.StripRootDir(info.MountFile()) whereDir := dirs.StripRootDir(info.MountDir()) // Ensure mount files, but do not restart mount units @@ -1302,9 +1311,23 @@ // This is especially relevant for the snapd snap as if // this happens, it would end up in a bad state after // an update. + // TODO Ensure mounts of snap components as well + // TODO refactor so the check for kernel type is not repeated + // in the installation case + snapType, _ := snapSt.Type() + // We cannot ensure for this type yet as the mount unit + // flags depend on the model in this case. + if snapType == snap.TypeKernel && dev == nil { + continue + } if _, err = sysd.EnsureMountUnitFile(info.MountDescription(), squashfsPath, whereDir, "squashfs", - systemd.EnsureMountUnitFlags{PreventRestartIfModified: true}); err != nil { + systemd.EnsureMountUnitFlags{ + PreventRestartIfModified: true, + // We need early mounts only for UC20+/hybrid, also 16.04 + // systemd seems to be buggy if we enable this. + StartBeforeDriversLoad: snapType == snap.TypeKernel && + dev.HasModeenv()}); err != nil { return err } } diff -Nru snapd-2.62+23.10/overlord/snapstate/snapstate.go snapd-2.63+23.10/overlord/snapstate/snapstate.go --- snapd-2.62+23.10/overlord/snapstate/snapstate.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/snapstate.go 2024-04-24 00:00:39.000000000 +0000 @@ -553,7 +553,7 @@ // This task is necessary only for UC20+ and hybrid if snapsup.Type == snap.TypeKernel && needsKernelSetup(deviceCtx) { - setupKernel := st.NewTask("setup-kernel-snap", fmt.Sprintf(i18n.G("Setup kernel driver tree for %q%s"), snapsup.InstanceName(), revisionStr)) + setupKernel := st.NewTask("prepare-kernel-snap", fmt.Sprintf(i18n.G("Prepare kernel driver tree for %q%s"), snapsup.InstanceName(), revisionStr)) addTask(setupKernel) prev = setupKernel } @@ -619,7 +619,7 @@ if snapsup.Type == snap.TypeKernel && needsKernelSetup(deviceCtx) { // This task needs to run after we're back and running the new // kernel after a reboot was requested in link-snap handler. - setupKernel := st.NewTask("remove-old-kernel-snap-setup", fmt.Sprintf(i18n.G("Cleanup kernel driver tree for %q%s"), snapsup.InstanceName(), revisionStr)) + setupKernel := st.NewTask("discard-old-kernel-snap-setup", fmt.Sprintf(i18n.G("Discard kernel driver tree for %q%s"), snapsup.InstanceName(), revisionStr)) addTask(setupKernel) prev = setupKernel } @@ -3146,7 +3146,7 @@ return nil, err } if needsKernelSetup(deviceCtx) { - setupKernel := st.NewTask("setup-kernel-snap", fmt.Sprintf(i18n.G("Setup kernel driver tree for %q (%s) for remodel"), snapsup.InstanceName(), snapst.Current)) + setupKernel := st.NewTask("prepare-kernel-snap", fmt.Sprintf(i18n.G("Prepare kernel driver tree for %q (%s) for remodel"), snapsup.InstanceName(), snapst.Current)) ts.AddTask(setupKernel) setupKernel.Set("snap-setup-task", prepareSnap.ID()) setupKernel.WaitFor(prev) @@ -3205,7 +3205,7 @@ return nil, err } if needsKernelSetup(deviceCtx) { - setupKernel := st.NewTask("setup-kernel-snap", fmt.Sprintf(i18n.G("Setup kernel driver tree for %q (%s) for remodel"), snapsup.InstanceName(), snapsup.Revision())) + setupKernel := st.NewTask("prepare-kernel-snap", fmt.Sprintf(i18n.G("Prepare kernel driver tree for %q (%s) for remodel"), snapsup.InstanceName(), snapsup.Revision())) setupKernel.Set("snap-setup-task", snapSetupTask.ID()) setupKernel.WaitFor(prev) ts.AddTask(setupKernel) diff -Nru snapd-2.62+23.10/overlord/snapstate/snapstate_install_test.go snapd-2.63+23.10/overlord/snapstate/snapstate_install_test.go --- snapd-2.62+23.10/overlord/snapstate/snapstate_install_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/snapstate_install_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -100,7 +100,7 @@ expected = append(expected, "unlink-current-snap") } if opts&updatesGadgetAssets != 0 && opts&needsKernelSetup != 0 { - expected = append(expected, "setup-kernel-snap") + expected = append(expected, "prepare-kernel-snap") } if opts&(updatesGadget|updatesGadgetAssets) != 0 { expected = append(expected, "update-gadget-assets") @@ -114,7 +114,7 @@ "link-snap", "auto-connect") if opts&updatesGadgetAssets != 0 && opts&needsKernelSetup != 0 { - expected = append(expected, "remove-old-kernel-snap-setup") + expected = append(expected, "discard-old-kernel-snap-setup") } expected = append(expected, "set-auto-aliases", diff -Nru snapd-2.62+23.10/overlord/snapstate/snapstate_test.go snapd-2.63+23.10/overlord/snapstate/snapstate_test.go --- snapd-2.62+23.10/overlord/snapstate/snapstate_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/snapstate_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -7975,7 +7975,7 @@ "start-snap-services", "run-hook[configure]", "run-hook[check-health]", - "remove-old-kernel-snap-setup", + "discard-old-kernel-snap-setup", } func kindsToSet(kinds []string) map[string]bool { @@ -8022,8 +8022,8 @@ if opts&needsKernelSetup != 0 { c.Assert(tasks, HasLen, 4) tSetupKernelSnap := tasks[1] - c.Assert(tSetupKernelSnap.Kind(), Equals, "setup-kernel-snap") - c.Assert(tSetupKernelSnap.Summary(), Equals, `Setup kernel driver tree for "some-kernel" (2) for remodel`) + c.Assert(tSetupKernelSnap.Kind(), Equals, "prepare-kernel-snap") + c.Assert(tSetupKernelSnap.Summary(), Equals, `Prepare kernel driver tree for "some-kernel" (2) for remodel`) c.Assert(tSetupKernelSnap.WaitTasks(), DeepEquals, []*state.Task{tPrepare}) tUpdateGadgetAssets = tasks[2] tLink = tasks[3] @@ -8144,8 +8144,8 @@ if opts&needsKernelSetup != 0 { c.Assert(tasks, HasLen, 5) tSetupKernelSnap := tasks[2] - c.Assert(tSetupKernelSnap.Kind(), Equals, "setup-kernel-snap") - c.Assert(tSetupKernelSnap.Summary(), Equals, `Setup kernel driver tree for "some-kernel" (2) for remodel`) + c.Assert(tSetupKernelSnap.Kind(), Equals, "prepare-kernel-snap") + c.Assert(tSetupKernelSnap.Summary(), Equals, `Prepare kernel driver tree for "some-kernel" (2) for remodel`) c.Assert(tSetupKernelSnap.WaitTasks(), DeepEquals, []*state.Task{ testTask, }) @@ -8912,7 +8912,6 @@ Description=Mount unit for test-snap, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -8960,7 +8959,6 @@ Description=Mount unit for test-snap, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -9029,7 +9027,6 @@ Description=Mount unit for test-snap, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s diff -Nru snapd-2.62+23.10/overlord/snapstate/snapstate_update_test.go snapd-2.63+23.10/overlord/snapstate/snapstate_update_test.go --- snapd-2.62+23.10/overlord/snapstate/snapstate_update_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/snapstate/snapstate_update_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,7 @@ "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "sort" @@ -8221,7 +8221,7 @@ c.Assert(err, IsNil) defer file.Close() - data, err := ioutil.ReadAll(file) + data, err := io.ReadAll(file) c.Assert(err, IsNil) // check sequence file has expected migration value @@ -10237,7 +10237,7 @@ c.Assert(tasks[0].Summary(), testutil.Contains, "Pre-download snap \"some-snap\" (2) from channel") } -func (s *snapmgrTestSuite) TestAutoRefreshRefreshInhibitNoticeRecorded(c *C) { +func (s *snapmgrTestSuite) testAutoRefreshRefreshInhibitNoticeRecorded(c *C, markerInterfaceConnected bool, warningFallback bool) { refreshAppsCheckCalled := 0 restore := snapstate.MockRefreshAppsCheck(func(si *snap.Info) error { refreshAppsCheckCalled++ @@ -10255,6 +10255,20 @@ }) defer restore() + var connCheckCalled int + restore = snapstate.MockHasActiveConnection(func(st *state.State, iface string) (bool, error) { + connCheckCalled++ + c.Check(iface, Equals, "snap-refresh-observe") + return markerInterfaceConnected, nil + }) + defer restore() + + // let's add some random warnings + s.state.Lock() + s.state.Warnf("this is a random warning 1") + s.state.Warnf("this is a random warning 2") + s.state.Unlock() + s.state.Lock() snapstate.Set(s.state, "some-snap", &snapstate.SnapState{ Active: true, @@ -10294,8 +10308,10 @@ c.Check(chgs[1].Status(), Equals, state.DoStatus) c.Check(chgs[0].Kind(), Equals, "auto-refresh") c.Check(chgs[0].Status(), Equals, state.DoStatus) - // No notices are recorded until auto-refresh change is marked as ready. + // No notices or warnings are recorded until auto-refresh change is marked as ready. checkRefreshInhibitNotice(c, s.state, 0) + // no "refresh inhibition" warnings recorded + checkNoRefreshInhibitWarning(c, s.state) s.settle(c) @@ -10317,8 +10333,33 @@ c.Check(chgs[0].Kind(), Equals, "auto-refresh") c.Check(chgs[0].Status(), Equals, state.DoneStatus) - // Aggregate notice is recorded when auto-refresh change is marked as ready. + // Aggregate notice and warning is recorded when auto-refresh change is marked as ready. checkRefreshInhibitNotice(c, s.state, 1) + if warningFallback { + checkRefreshInhibitWarning(c, s.state, []string{"some-snap"}, time.Time{}) + } else { + checkNoRefreshInhibitWarning(c, s.state) + } +} + +func (s *snapmgrTestSuite) TestAutoRefreshRefreshInhibitNoticeRecorded(c *C) { + s.enableRefreshAppAwarenessUX() + const markerInterfaceConnected = true + const warningFallback = false + s.testAutoRefreshRefreshInhibitNoticeRecorded(c, markerInterfaceConnected, warningFallback) +} + +func (s *snapmgrTestSuite) TestAutoRefreshRefreshInhibitNoticeRecordedWarningFallback(c *C) { + s.enableRefreshAppAwarenessUX() + const markerInterfaceConnected = false + const warningFallback = true + s.testAutoRefreshRefreshInhibitNoticeRecorded(c, markerInterfaceConnected, warningFallback) +} + +func (s *snapmgrTestSuite) TestAutoRefreshRefreshInhibitNoticeRecordedWarningFallbackNoRAAUX(c *C) { + const markerInterfaceConnected = false + const warningFallback = false + s.testAutoRefreshRefreshInhibitNoticeRecorded(c, markerInterfaceConnected, warningFallback) } func (s *snapmgrTestSuite) TestAutoRefreshRefreshInhibitNoticeRecordedOnPreDownloadOnly(c *C) { diff -Nru snapd-2.62+23.10/overlord/state/copy_test.go snapd-2.63+23.10/overlord/state/copy_test.go --- snapd-2.62+23.10/overlord/state/copy_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/copy_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,6 @@ package state_test import ( - "io/ioutil" "os" "path/filepath" @@ -94,7 +93,7 @@ c.Assert(err, IsNil) // and check that the right bits got copied - dstContent, err := ioutil.ReadFile(dstStateFile) + dstContent, err := os.ReadFile(dstStateFile) c.Assert(err, IsNil) c.Check(string(dstContent), Equals, `{"data":{"auth":{"last-id":1,"users":[{"id":1,"email":"some@user.com","macaroon":"1234","store-macaroon":"5678","store-discharges":["9012345"]}]}}`+stateSuffix) } @@ -117,7 +116,7 @@ err = state.CopyState(srcStateFile, dstStateFile, []string{"A.B", "no-existing-does-not-error", "E.F", "E", "I", "E.non-existing"}) c.Assert(err, IsNil) - dstContent, err := ioutil.ReadFile(dstStateFile) + dstContent, err := os.ReadFile(dstStateFile) c.Assert(err, IsNil) c.Check(string(dstContent), Equals, `{"data":{"A":{"B":[{"C":1},{"D":2}]},"E":{"F":2,"G":3},"I":null}`+stateSuffix) } @@ -141,7 +140,7 @@ err = state.CopyState(srcStateFile, dstStateFile, []string{"E", "E"}) c.Assert(err, IsNil) - dstContent, err := ioutil.ReadFile(dstStateFile) + dstContent, err := os.ReadFile(dstStateFile) c.Assert(err, IsNil) c.Check(string(dstContent), Equals, `{"data":{"E":{"F":2,"G":3}}`+stateSuffix) } diff -Nru snapd-2.62+23.10/overlord/state/export_test.go snapd-2.63+23.10/overlord/state/export_test.go --- snapd-2.62+23.10/overlord/state/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -45,15 +45,6 @@ t.readyTime = readyTime } -func (s *State) AddWarning(message string, lastAdded, lastShown time.Time, expireAfter, repeatAfter time.Duration) { - s.addWarning(Warning{ - message: message, - lastShown: lastShown, - expireAfter: expireAfter, - repeatAfter: repeatAfter, - }, lastAdded) -} - func (w Warning) LastAdded() time.Time { return w.lastAdded } @@ -67,11 +58,13 @@ } var ( + DefaultWarningExpireAfter = defaultWarningExpireAfter + DefaultWarningRepeatAfter = defaultWarningRepeatAfter + ErrNoWarningMessage = errNoWarningMessage ErrBadWarningMessage = errBadWarningMessage ErrNoWarningFirstAdded = errNoWarningFirstAdded ErrNoWarningExpireAfter = errNoWarningExpireAfter - ErrNoWarningRepeatAfter = errNoWarningRepeatAfter ) // NumNotices returns the total bumber of notices, including expired ones that diff -Nru snapd-2.62+23.10/overlord/state/notices.go snapd-2.63+23.10/overlord/state/notices.go --- snapd-2.62+23.10/overlord/state/notices.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/notices.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,6 +26,8 @@ const ( // defaultNoticeExpireAfter is the default expiry time for notices. defaultNoticeExpireAfter = 7 * 24 * time.Hour + // maxNoticeKeyLength is the max size in bytes for a notice key. + maxNoticeKeyLength = 256 ) // Notice represents an aggregated notice. The combination of type and key is unique. @@ -199,11 +201,15 @@ // Recorded whenever an auto-refresh is inhibited for one or more snaps. RefreshInhibitNotice NoticeType = "refresh-inhibit" + + // Recorded by "snap run" command when it is inhibited from running a + // a snap due an ongoing refresh. + SnapRunInhibitNotice NoticeType = "snap-run-inhibit" ) func (t NoticeType) Valid() bool { switch t { - case ChangeUpdateNotice, WarningNotice, RefreshInhibitNotice: + case ChangeUpdateNotice, WarningNotice, RefreshInhibitNotice, SnapRunInhibitNotice: return true } return false @@ -228,9 +234,9 @@ if options == nil { options = &AddNoticeOptions{} } - err := validateNotice(noticeType, key, options) + err := ValidateNotice(noticeType, key, options) if err != nil { - return "", err + return "", fmt.Errorf("internal error: %w", err) } s.writing() @@ -279,15 +285,19 @@ return notice.id, nil } -func validateNotice(noticeType NoticeType, key string, options *AddNoticeOptions) error { +// ValidateNotice validates notice type and key before adding. +func ValidateNotice(noticeType NoticeType, key string, options *AddNoticeOptions) error { if !noticeType.Valid() { - return fmt.Errorf("internal error: attempted to add notice with invalid type %q", noticeType) + return fmt.Errorf("cannot add notice with invalid type %q", noticeType) } if key == "" { - return fmt.Errorf("internal error: attempted to add %s notice with invalid key %q", noticeType, key) + return fmt.Errorf("cannot add %s notice with invalid key %q", noticeType, key) + } + if len(key) > maxNoticeKeyLength { + return fmt.Errorf("cannot add %s notice with invalid key: key must be %d bytes or less", noticeType, maxNoticeKeyLength) } if noticeType == RefreshInhibitNotice && key != "-" { - return fmt.Errorf(`internal error: attempted to add %s notice with invalid key %q, only "-" key is supported`, noticeType, key) + return fmt.Errorf(`cannot add %s notice with invalid key %q: only "-" key is supported`, noticeType, key) } return nil } diff -Nru snapd-2.62+23.10/overlord/state/notices_test.go snapd-2.63+23.10/overlord/state/notices_test.go --- snapd-2.62+23.10/overlord/state/notices_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/notices_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,6 +20,7 @@ "encoding/json" "errors" "fmt" + "strings" "sync" "time" @@ -286,14 +287,16 @@ addNotice(c, st, nil, state.WarningNotice, "Warning 1!", nil) time.Sleep(time.Microsecond) addNotice(c, st, nil, state.WarningNotice, "Warning 2!", nil) + time.Sleep(time.Microsecond) + addNotice(c, st, nil, state.SnapRunInhibitNotice, "snap-name", nil) // No filter notices := st.Notices(nil) - c.Assert(notices, HasLen, 4) + c.Assert(notices, HasLen, 5) // No types notices = st.Notices(&state.NoticeFilter{}) - c.Assert(notices, HasLen, 4) + c.Assert(notices, HasLen, 5) // One type notices = st.Notices(&state.NoticeFilter{Types: []state.NoticeType{state.WarningNotice}}) @@ -323,6 +326,14 @@ c.Check(n["type"], Equals, "refresh-inhibit") c.Check(n["key"], Equals, "-") + // Another type + notices = st.Notices(&state.NoticeFilter{Types: []state.NoticeType{state.SnapRunInhibitNotice}}) + c.Assert(notices, HasLen, 1) + n = noticeToMap(c, notices[0]) + c.Check(n["user-id"], Equals, nil) + c.Check(n["type"], Equals, "snap-run-inhibit") + c.Check(n["key"], Equals, "snap-name") + // Multiple types notices = st.Notices(&state.NoticeFilter{Types: []state.NoticeType{ state.ChangeUpdateNotice, @@ -649,17 +660,22 @@ // Invalid type id, err := st.AddNotice(nil, "bad-type", "123", nil) - c.Check(err, ErrorMatches, `internal error: attempted to add notice with invalid type "bad-type"`) + c.Check(err, ErrorMatches, `internal error: cannot add notice with invalid type "bad-type"`) c.Check(id, Equals, "") // Empty key id, err = st.AddNotice(nil, state.ChangeUpdateNotice, "", nil) - c.Check(err, ErrorMatches, `internal error: attempted to add change-update notice with invalid key ""`) + c.Check(err, ErrorMatches, `internal error: cannot add change-update notice with invalid key ""`) + c.Check(id, Equals, "") + + // Large key + id, err = st.AddNotice(nil, state.ChangeUpdateNotice, strings.Repeat("x", 257), nil) + c.Check(err, ErrorMatches, `internal error: cannot add change-update notice with invalid key: key must be 256 bytes or less`) c.Check(id, Equals, "") // Unxpected key for refresh-inhibit notice id, err = st.AddNotice(nil, state.RefreshInhibitNotice, "123", nil) - c.Check(err, ErrorMatches, `internal error: attempted to add refresh-inhibit notice with invalid key "123", only "-" key is supported`) + c.Check(err, ErrorMatches, `internal error: cannot add refresh-inhibit notice with invalid key "123": only "-" key is supported`) c.Check(id, Equals, "") } diff -Nru snapd-2.62+23.10/overlord/state/state_test.go snapd-2.63+23.10/overlord/state/state_test.go --- snapd-2.62+23.10/overlord/state/state_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/state_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -833,7 +833,10 @@ state.MockTaskTimes(t5, now.Add(-pruneWait), now.Add(-pruneWait)) // two warnings, one expired - st.AddWarning("hello", now, never, time.Nanosecond, state.DefaultRepeatAfter) + st.AddWarning("hello", &state.AddWarningOptions{ + Time: now.Add(-state.DefaultWarningExpireAfter), + RepeatAfter: state.DefaultWarningRepeatAfter, + }) st.Warnf("hello again") past := time.Now().AddDate(-1, 0, 0) diff -Nru snapd-2.62+23.10/overlord/state/taskrunner.go snapd-2.63+23.10/overlord/state/taskrunner.go --- snapd-2.62+23.10/overlord/state/taskrunner.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/taskrunner.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2016-2022 Canonical Ltd + * Copyright (C) 2016-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -296,7 +296,7 @@ t.SetStatus(ErrorStatus) t.Errorf("%s", err) // ensure the error is available in the global log too - logger.Noticef("[change %s %q task] failed: %v", t.Change().ID(), t.Summary(), err) + logger.Noticef("Change %s task (%s) failed: %v", t.Change().ID(), t.Summary(), err) if r.taskErrorCallback != nil { r.taskErrorCallback(err) } diff -Nru snapd-2.62+23.10/overlord/state/taskrunner_test.go snapd-2.63+23.10/overlord/state/taskrunner_test.go --- snapd-2.62+23.10/overlord/state/taskrunner_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/taskrunner_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2016-2022 Canonical Ltd + * Copyright (C) 2016-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -1295,7 +1295,7 @@ c.Check(strings.Join(t1.Log(), ""), Matches, `.*handler error for "foo"`) c.Check(called, Equals, true) - c.Check(logbuf.String(), Matches, `(?m).*: \[change 1 "task summary" task\] failed: handler error for "foo".*`) + c.Check(logbuf.String(), Matches, `(?m).*: Change 1 task \(task summary\) failed: handler error for "foo".*`) } func (ts *taskRunnerSuite) TestErrorCallbackNotCalled(c *C) { diff -Nru snapd-2.62+23.10/overlord/state/warning.go snapd-2.63+23.10/overlord/state/warning.go --- snapd-2.62+23.10/overlord/state/warning.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/warning.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,7 @@ // -*- Mode: Go; indent-tabs-mode: t -*- /* - * Copyright (C) 2018 Canonical Ltd + * Copyright (C) 2018-2024 Canonical Ltd * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -31,14 +31,13 @@ ) var ( - DefaultRepeatAfter = time.Hour * 24 - DefaultExpireAfter = time.Hour * 24 * 28 + defaultWarningRepeatAfter = time.Hour * 24 + defaultWarningExpireAfter = time.Hour * 24 * 28 errNoWarningMessage = errors.New("warning has no message") errBadWarningMessage = errors.New("malformed warning message") errNoWarningFirstAdded = errors.New("warning has no first-added timestamp") errNoWarningExpireAfter = errors.New("warning has no expire-after duration") - errNoWarningRepeatAfter = errors.New("warning has no repeat-after duration") ) type jsonWarning struct { @@ -125,9 +124,7 @@ if w.expireAfter == 0 { return errNoWarningExpireAfter } - if w.repeatAfter == 0 { - return errNoWarningRepeatAfter - } + return nil } @@ -184,26 +181,66 @@ } else { message = template } - s.addWarning(Warning{ - message: message, - expireAfter: DefaultExpireAfter, - repeatAfter: DefaultRepeatAfter, - }, time.Now().UTC()) + s.AddWarning(message, &AddWarningOptions{ + RepeatAfter: defaultWarningRepeatAfter, + }) +} + +// AddWarningOptions holds optional parameters for an AddWarning call. +type AddWarningOptions struct { + // RepeatAfter defines how long after this warning was last shown we + // should allow it to repeat. Zero means always repeat. + RepeatAfter time.Duration + + // Time, if set, overrides time.Now() as the warning lastAdded time. + Time time.Time } -func (s *State) addWarning(w Warning, t time.Time) { +// AddWarning records a warning with the specified message and options. +func (s *State) AddWarning(message string, options *AddWarningOptions) { + if options == nil { + options = &AddWarningOptions{} + } + s.writing() - if s.warnings[w.message] == nil { - w.firstAdded = t - if err := w.validate(); err != nil { + now := options.Time + if now.IsZero() { + now = timeNow() + } + now = now.UTC() + + warning, ok := s.warnings[message] + if !ok { + warning = &Warning{ + message: message, + firstAdded: now, + expireAfter: defaultWarningExpireAfter, + } + if err := warning.validate(); err != nil { // programming error! logger.Panicf("internal error, please report: attempted to add invalid warning: %v", err) return } - s.warnings[w.message] = &w + s.warnings[message] = warning } - s.warnings[w.message].lastAdded = t + + warning.lastAdded = now + warning.repeatAfter = options.RepeatAfter +} + +// RemoveWarning removes a warning given its message. +// +// Returns state.ErrNoState if no warning exists with given message. +func (s *State) RemoveWarning(message string) error { + s.writing() + _, ok := s.warnings[message] + if !ok { + return ErrNoState + } + + delete(s.warnings, message) + return nil } type byLastAdded []*Warning diff -Nru snapd-2.62+23.10/overlord/state/warning_test.go snapd-2.63+23.10/overlord/state/warning_test.go --- snapd-2.62+23.10/overlord/state/warning_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/overlord/state/warning_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -28,10 +28,9 @@ "gopkg.in/check.v1" "github.com/snapcore/snapd/overlord/state" + "github.com/snapcore/snapd/testutil" ) -var never time.Time - func (stateSuite) testMarshalWarning(shown bool, c *check.C) { st := state.New(nil) st.Lock() @@ -57,8 +56,8 @@ c.Assert(v, check.HasLen, 1) c.Check(v[0], check.HasLen, expectedNumKeys) c.Check(v[0]["message"], check.DeepEquals, "hello") - c.Check(v[0]["expire-after"], check.Equals, state.DefaultExpireAfter.String()) - c.Check(v[0]["repeat-after"], check.Equals, state.DefaultRepeatAfter.String()) + c.Check(v[0]["expire-after"], check.Equals, state.DefaultWarningExpireAfter.String()) + c.Check(v[0]["repeat-after"], check.Equals, state.DefaultWarningRepeatAfter.String()) c.Check(v[0]["first-added"], check.Equals, v[0]["last-added"]) t, err := time.Parse(time.RFC3339, v[0]["first-added"]) c.Assert(err, check.IsNil) @@ -105,7 +104,6 @@ {`{ "first-added": "2006-01-02T15:04:05Z", "expire-after": "1h", "repeat-after": "1h"}`, state.ErrNoWarningMessage}, {`{"message": "x", "expire-after": "1h", "repeat-after": "1h"}`, state.ErrNoWarningFirstAdded}, {`{"message": "x", "first-added": "2006-01-02T15:04:05Z", "repeat-after": "1h"}`, state.ErrNoWarningExpireAfter}, - {`{"message": "x", "first-added": "2006-01-02T15:04:05Z", "expire-after": "1h" }`, state.ErrNoWarningRepeatAfter}, } { var w state.Warning c.Check(json.Unmarshal([]byte(t.b), &w), check.Equals, t.e) @@ -135,12 +133,15 @@ func (stateSuite) TestDeleteExpired(c *check.C) { const dt = 20 * time.Millisecond - oldTime := time.Now() + oldTime := time.Now().Add(dt - state.DefaultWarningExpireAfter) st := state.New(nil) st.Lock() defer st.Unlock() st.Warnf("hello again") // adding this twice to trigger the swap in sort - st.AddWarning("hello", oldTime, never, dt, state.DefaultRepeatAfter) + st.AddWarning("hello", &state.AddWarningOptions{ + Time: oldTime, + RepeatAfter: state.DefaultWarningRepeatAfter, + }) st.Warnf("hello again") allWs := st.AllWarnings() @@ -163,11 +164,14 @@ func (stateSuite) TestOldRepeatedWarning(c *check.C) { now := time.Now() - oldTime := now.UTC().Add(-2 * state.DefaultExpireAfter) + oldTime := now.UTC().Add(-2 * state.DefaultWarningExpireAfter) st := state.New(nil) st.Lock() defer st.Unlock() - st.AddWarning("hello", oldTime, never, state.DefaultExpireAfter, state.DefaultRepeatAfter) + st.AddWarning("hello", &state.AddWarningOptions{ + Time: oldTime, + RepeatAfter: state.DefaultWarningRepeatAfter, + }) st.Warnf("hello") allWs := st.AllWarnings() @@ -199,10 +203,13 @@ st.Lock() defer st.Unlock() t0 := time.Now().Add(-100 * time.Hour) - st.AddWarning("hello", t0, never, state.DefaultExpireAfter, state.DefaultRepeatAfter) + st.AddWarning("hello", &state.AddWarningOptions{ + Time: t0, + RepeatAfter: state.DefaultWarningRepeatAfter, + }) n, t := st.WarningsSummary() c.Check(n, check.Equals, 1) - c.Check(t, check.DeepEquals, t0) + c.Check(t.Equal(t0), check.Equals, true) } func (stateSuite) TestShowAndOkay(c *check.C) { @@ -246,7 +253,7 @@ defer st.Unlock() const myRepeatAfter = 2 * time.Second t0 := time.Now() - st.AddWarning("hello", t0, never, state.DefaultExpireAfter, myRepeatAfter) + st.AddWarning("hello", &state.AddWarningOptions{Time: t0}) ws, t1 := st.PendingWarnings() c.Assert(ws, check.HasLen, 1) c.Check(fmt.Sprintf("%q", ws), check.Equals, `["hello"]`) @@ -254,7 +261,7 @@ n := st.OkayWarnings(t1) c.Check(n, check.Equals, 1) - st.Warnf("hello") + st.AddWarning("hello", &state.AddWarningOptions{RepeatAfter: myRepeatAfter}) ws, _ = st.PendingWarnings() c.Check(ws, check.HasLen, 0) // not enough time has passed @@ -265,3 +272,26 @@ c.Check(ws, check.HasLen, 1) c.Check(fmt.Sprintf("%q", ws), check.Equals, `["hello"]`) } + +func (stateSuite) TestRemoveWarning(c *check.C) { + st := state.New(nil) + st.Lock() + defer st.Unlock() + + // cannot replace a non existing warning + err := st.RemoveWarning("this warning does not exist") + c.Assert(err, testutil.ErrorIs, state.ErrNoState) + ws := st.AllWarnings() + c.Check(ws, check.HasLen, 0) + + st.Warnf("this warning exists") + ws = st.AllWarnings() + c.Check(ws, check.HasLen, 1) + c.Check(ws[0].String(), check.Equals, "this warning exists") + + // check warning is removed + err = st.RemoveWarning("this warning exists") + c.Assert(err, check.IsNil) + ws = st.AllWarnings() + c.Check(ws, check.HasLen, 0) +} diff -Nru snapd-2.62+23.10/packaging/amzn-2/snapd.spec snapd-2.63+23.10/packaging/amzn-2/snapd.spec --- snapd-2.62+23.10/packaging/amzn-2/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/amzn-2/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/amzn-2023/snapd.spec snapd-2.63+23.10/packaging/amzn-2023/snapd.spec --- snapd-2.62+23.10/packaging/amzn-2023/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/amzn-2023/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/arch/PKGBUILD snapd-2.63+23.10/packaging/arch/PKGBUILD --- snapd-2.62+23.10/packaging/arch/PKGBUILD 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/arch/PKGBUILD 2024-04-24 00:00:39.000000000 +0000 @@ -11,7 +11,7 @@ depends=('squashfs-tools' 'libseccomp' 'libsystemd' 'apparmor') optdepends=('bash-completion: bash completion support' 'xdg-desktop-portal: desktop integration') -pkgver=2.62 +pkgver=2.63 pkgrel=1 arch=('x86_64' 'i686' 'armv7h' 'aarch64') url="https://github.com/snapcore/snapd" diff -Nru snapd-2.62+23.10/packaging/centos-7/snapd.spec snapd-2.63+23.10/packaging/centos-7/snapd.spec --- snapd-2.62+23.10/packaging/centos-7/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/centos-7/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/centos-8/snapd.spec snapd-2.63+23.10/packaging/centos-8/snapd.spec --- snapd-2.62+23.10/packaging/centos-8/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/centos-8/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/centos-9/snapd.spec snapd-2.63+23.10/packaging/centos-9/snapd.spec --- snapd-2.62+23.10/packaging/centos-9/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/centos-9/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/debian-sid/changelog snapd-2.63+23.10/packaging/debian-sid/changelog --- snapd-2.62+23.10/packaging/debian-sid/changelog 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/debian-sid/changelog 2024-04-24 00:00:39.000000000 +0000 @@ -1,3 +1,54 @@ +snapd (2.63-1) unstable; urgency=medium + + * New upstream release, LP: #2061179 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + + -- Ernest Lotter Wed, 24 Apr 2024 02:00:39 +0200 + snapd (2.62-1) unstable; urgency=medium * New upstream release, LP: #2058277 diff -Nru snapd-2.62+23.10/packaging/debian-sid/control snapd-2.63+23.10/packaging/debian-sid/control --- snapd-2.62+23.10/packaging/debian-sid/control 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/debian-sid/control 2024-04-24 00:00:39.000000000 +0000 @@ -22,6 +22,7 @@ gettext, gnupg2, golang-dbus-dev, + golang-github-coreos-bbolt-dev, golang-github-coreos-go-systemd-dev, golang-github-gorilla-mux-dev, golang-github-jessevdk-go-flags-dev, @@ -80,6 +81,7 @@ ca-certificates, default-dbus-session-bus | dbus-session-bus, gnupg1 | gnupg, + kmod, openssh-client, squashfs-tools, systemd, diff -Nru snapd-2.62+23.10/packaging/debian-sid/rules snapd-2.63+23.10/packaging/debian-sid/rules --- snapd-2.62+23.10/packaging/debian-sid/rules 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/debian-sid/rules 2024-04-24 00:00:39.000000000 +0000 @@ -65,8 +65,8 @@ # check if we need to include the testkeys in the binary # TAGS are the go build tags for all binaries, SNAP_TAGS are for snap # build only. -TAGS=nosecboot nobolt -SNAP_TAGS=nosecboot nomanagers nobolt +TAGS=nosecboot +SNAP_TAGS=nosecboot nomanagers ifneq (,$(filter testkeys,$(DEB_BUILD_OPTIONS))) TAGS+= withtestkeys SNAP_TAGS+= withtestkeys @@ -130,14 +130,6 @@ $(MAKE) -C cmd distclean || true override_dh_auto_build: - # Drop the go.mod entry of github.com/snapcore/bolt and - # advisor/backend_bolt.go as it is not packaged in Debian. - # The program dh_golang uses "go list" to enumerate packages but this code - # does not support build tags (go list does support this but the feature is - # unused). The only way to avoid the problem of importing the package we - # don't want to import, is to remove the file altogether. - rm _build/src/$(DH_GOPKG)/advisor/backend_bolt.go - sed -i -e '/\tgithub.com\/snapcore\/bolt/d' _build/src/$(DH_GOPKG)/go.mod # usually done via `go generate` but that is not supported on powerpc GO_GENERATE_BUILDDIR=_build/src/$(DH_GOPKG) GO111MODULE=off GOPATH=$$(pwd)/_build ./mkversion.sh # Build golang bits diff -Nru snapd-2.62+23.10/packaging/fedora/snapd.spec snapd-2.63+23.10/packaging/fedora/snapd.spec --- snapd-2.62+23.10/packaging/fedora/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/fedora/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/fedora-38/snapd.spec snapd-2.63+23.10/packaging/fedora-38/snapd.spec --- snapd-2.62+23.10/packaging/fedora-38/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/fedora-38/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/fedora-39/snapd.spec snapd-2.63+23.10/packaging/fedora-39/snapd.spec --- snapd-2.62+23.10/packaging/fedora-39/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/fedora-39/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/fedora-rawhide/snapd.spec snapd-2.63+23.10/packaging/fedora-rawhide/snapd.spec --- snapd-2.62+23.10/packaging/fedora-rawhide/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/fedora-rawhide/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -104,7 +104,7 @@ %endif Name: snapd -Version: 2.62 +Version: 2.63 Release: 0%{?dist} Summary: A transactional software package manager License: GPLv3 @@ -166,7 +166,7 @@ %endif %if ! 0%{?with_bundled} -BuildRequires: golang(github.com/boltdb/bolt) +BuildRequires: golang(go.etcd.io/bbolt) BuildRequires: golang(github.com/coreos/go-systemd/activation) BuildRequires: golang(github.com/godbus/dbus) BuildRequires: golang(github.com/godbus/dbus/introspect) @@ -263,7 +263,7 @@ %endif %if ! 0%{?with_bundled} -Requires: golang(github.com/boltdb/bolt) +Requires: golang(go.etcd.io/bbolt) Requires: golang(github.com/coreos/go-systemd/activation) Requires: golang(github.com/godbus/dbus) Requires: golang(github.com/godbus/dbus/introspect) @@ -292,7 +292,7 @@ # These Provides are unversioned because the sources in # the bundled tarball are unversioned (they go by git commit) # *sigh*... I hate golang... -Provides: bundled(golang(github.com/snapcore/bolt)) +Provides: bundled(golang(go.etcd.io/bbolt)) Provides: bundled(golang(github.com/coreos/go-systemd/activation)) Provides: bundled(golang(github.com/godbus/dbus)) Provides: bundled(golang(github.com/godbus/dbus/introspect)) @@ -1004,6 +1004,54 @@ %changelog +* Wed Apr 24 2024 Ernest Lotter +- New upstream release 2.63 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + * Thu Mar 21 2024 Ernest Lotter - New upstream release 2.62 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/packaging/opensuse/snapd.changes snapd-2.63+23.10/packaging/opensuse/snapd.changes --- snapd-2.62+23.10/packaging/opensuse/snapd.changes 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/opensuse/snapd.changes 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,9 @@ ------------------------------------------------------------------- +Wed Apr 24 00:00:39 UTC 2024 - ernest.lotter@canonical.com + +- Update to upstream release 2.63 + +------------------------------------------------------------------- Thu Mar 21 20:06:10 UTC 2024 - ernest.lotter@canonical.com - Update to upstream release 2.62 diff -Nru snapd-2.62+23.10/packaging/opensuse/snapd.spec snapd-2.63+23.10/packaging/opensuse/snapd.spec --- snapd-2.62+23.10/packaging/opensuse/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/opensuse/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -82,7 +82,7 @@ Name: snapd -Version: 2.62 +Version: 2.63 Release: 0 Summary: Tools enabling systems to work with .snap files License: GPL-3.0 diff -Nru snapd-2.62+23.10/packaging/opensuse-15.5/snapd.changes snapd-2.63+23.10/packaging/opensuse-15.5/snapd.changes --- snapd-2.62+23.10/packaging/opensuse-15.5/snapd.changes 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/opensuse-15.5/snapd.changes 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,9 @@ ------------------------------------------------------------------- +Wed Apr 24 00:00:39 UTC 2024 - ernest.lotter@canonical.com + +- Update to upstream release 2.63 + +------------------------------------------------------------------- Thu Mar 21 20:06:10 UTC 2024 - ernest.lotter@canonical.com - Update to upstream release 2.62 diff -Nru snapd-2.62+23.10/packaging/opensuse-15.5/snapd.spec snapd-2.63+23.10/packaging/opensuse-15.5/snapd.spec --- snapd-2.62+23.10/packaging/opensuse-15.5/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/opensuse-15.5/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -82,7 +82,7 @@ Name: snapd -Version: 2.62 +Version: 2.63 Release: 0 Summary: Tools enabling systems to work with .snap files License: GPL-3.0 diff -Nru snapd-2.62+23.10/packaging/opensuse-tumbleweed/snapd.changes snapd-2.63+23.10/packaging/opensuse-tumbleweed/snapd.changes --- snapd-2.62+23.10/packaging/opensuse-tumbleweed/snapd.changes 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/opensuse-tumbleweed/snapd.changes 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,9 @@ ------------------------------------------------------------------- +Wed Apr 24 00:00:39 UTC 2024 - ernest.lotter@canonical.com + +- Update to upstream release 2.63 + +------------------------------------------------------------------- Thu Mar 21 20:06:10 UTC 2024 - ernest.lotter@canonical.com - Update to upstream release 2.62 diff -Nru snapd-2.62+23.10/packaging/opensuse-tumbleweed/snapd.spec snapd-2.63+23.10/packaging/opensuse-tumbleweed/snapd.spec --- snapd-2.62+23.10/packaging/opensuse-tumbleweed/snapd.spec 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/opensuse-tumbleweed/snapd.spec 2024-04-24 00:00:39.000000000 +0000 @@ -82,7 +82,7 @@ Name: snapd -Version: 2.62 +Version: 2.63 Release: 0 Summary: Tools enabling systems to work with .snap files License: GPL-3.0 diff -Nru snapd-2.62+23.10/packaging/ubuntu-14.04/changelog snapd-2.63+23.10/packaging/ubuntu-14.04/changelog --- snapd-2.62+23.10/packaging/ubuntu-14.04/changelog 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/ubuntu-14.04/changelog 2024-04-24 00:00:39.000000000 +0000 @@ -1,3 +1,54 @@ +snapd (2.63~14.04) trusty; urgency=medium + + * New upstream release, LP: #2061179 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + + -- Ernest Lotter Wed, 24 Apr 2024 02:00:39 +0200 + snapd (2.62~14.04) trusty; urgency=medium * New upstream release, LP: #2058277 diff -Nru snapd-2.62+23.10/packaging/ubuntu-16.04/changelog snapd-2.63+23.10/packaging/ubuntu-16.04/changelog --- snapd-2.62+23.10/packaging/ubuntu-16.04/changelog 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/packaging/ubuntu-16.04/changelog 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,55 @@ -snapd (2.62+23.10) mantic; urgency=medium +snapd (2.63+23.10) mantic; urgency=medium + + * New upstream release, LP: #2061179 + - Support for snap services to show the current status of user + services (experimental) + - Refresh app awareness: record snap-run-inhibit notice when + starting app from snap that is busy with refresh (experimental) + - Refresh app awareness: use warnings as fallback for desktop + notifications (experimental) + - Aspect based configuration: make request fields in the aspect- + bundle's rules optional (experimental) + - Aspect based configuration: make map keys conform to the same + format as path sub-keys (experimental) + - Aspect based configuration: make unset and set behaviour similar + to configuration options (experimental) + - Aspect based configuration: limit nesting level for setting value + (experimental) + - Components: use symlinks to point active snap component revisions + - Components: add model assertion support for components + - Components: fix to ensure local component installation always gets + a new revision number + - Add basic support for a CIFS remote filesystem-based home + directory + - Add support for AppArmor profile kill mode to avoid snap-confine + error + - Allow more than one interface to grant access to the same API + endpoint or notice type + - Allow all snapd service's control group processes to send systemd + notifications to prevent warnings flooding the log + - Enable not preseeded single boot install + - Update secboot to handle new sbatlevel + - Fix to not use cgroup for non-strict confined snaps (devmode, + classic) + - Fix two race conditions relating to freedesktop notifications + - Fix missing tunables in snap-update-ns AppArmor template + - Fix rejection of snapd snap udev command line by older host snap- + device-helper + - Rework seccomp allow/deny list + - Clean up files removed by gadgets + - Remove non-viable boot chains to avoid secboot failure + - posix_mq interface: add support for missing time64 mqueue syscalls + mq_timedreceive_time64 and mq_timedsend_time64 + - password-manager-service interface: allow kwalletd version 6 + - kubernetes-support interface: allow SOCK_SEQPACKET sockets + - system-observe interface: allow listing systemd units and their + properties + - opengl interface: enable use of nvidia container toolkit CDI + config generation + + -- Ernest Lotter Wed, 24 Apr 2024 02:00:39 +0200 + +snapd (2.62) xenial; urgency=medium * New upstream release, LP: #2058277 - Aspects based configuration schema support (experimental) diff -Nru snapd-2.62+23.10/polkit/pid_start_time.go snapd-2.63+23.10/polkit/pid_start_time.go --- snapd-2.62+23.10/polkit/pid_start_time.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/polkit/pid_start_time.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" ) @@ -39,7 +39,7 @@ // // https://cgit.freedesktop.org/polkit/tree/src/polkit/polkitunixprocess.c func getStartTimeForProcStatFile(filename string) (uint64, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { return 0, err } diff -Nru snapd-2.62+23.10/randutil/crypto.go snapd-2.63+23.10/randutil/crypto.go --- snapd-2.62+23.10/randutil/crypto.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/randutil/crypto.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,7 @@ cryptorand "crypto/rand" "encoding/base64" "fmt" - "io/ioutil" + "os" "strings" ) @@ -56,7 +56,7 @@ // /proc/sys/kernel/random/uuid. Only to be used in very specific uses, most // random code should use CryptoToken(Bytes) instead. func RandomKernelUUID() (string, error) { - b, err := ioutil.ReadFile(kernelUUIDPath) + b, err := os.ReadFile(kernelUUIDPath) if err != nil { return "", fmt.Errorf("cannot read kernel generated uuid: %w", err) } diff -Nru snapd-2.62+23.10/release/release_test.go snapd-2.63+23.10/release/release_test.go --- snapd-2.62+23.10/release/release_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/release/release_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -69,7 +68,7 @@ // MockFilesystemRootType changes relase.ProcMountsPath so that it points to a temp file // generated to contain the provided filesystem type func MockFilesystemRootType(c *C, fsType string) (restorer func()) { - tmpfile, err := ioutil.TempFile(c.MkDir(), "proc_mounts_mock_") + tmpfile, err := os.CreateTemp(c.MkDir(), "proc_mounts_mock_") c.Assert(err, IsNil) // Sample contents of /proc/mounts. The second line is the one that matters. diff -Nru snapd-2.62+23.10/run-checks snapd-2.63+23.10/run-checks --- snapd-2.62+23.10/run-checks 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/run-checks 2024-04-24 00:00:39.000000000 +0000 @@ -207,6 +207,22 @@ exit 1 fi + echo "Checking for usages of deprecated io/ioutil" + got="" + for dir in $(go list -f '{{.Dir}}' ./... | grep -v '/vendor/' ); do + # shellcheck disable=SC2063 + s="$(grep -nP io/ioutil "$dir"/*.go || true)" + if [ -n "$s" ]; then + got="$s\\n$got" + fi + done + + if [ -n "$got" ]; then + echo 'Found usages of deprecated io/ioutil, please use "io" or "os" equivalents' + echo "$got" + exit 1 + fi + if command -v shellcheck >/dev/null; then exclude_tools_path=tests/lib/external/snapd-testing-tools echo "Checking shell scripts..." diff -Nru snapd-2.62+23.10/sandbox/apparmor/apparmor.go snapd-2.63+23.10/sandbox/apparmor/apparmor.go --- snapd-2.62+23.10/sandbox/apparmor/apparmor.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/apparmor/apparmor.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bufio" "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -346,8 +345,8 @@ } func probeKernelFeatures() ([]string, error) { - // note that ioutil.ReadDir() is already sorted - dentries, err := ioutil.ReadDir(filepath.Join(rootPath, featuresSysPath)) + // note that os.ReadDir() is already sorted + dentries, err := os.ReadDir(filepath.Join(rootPath, featuresSysPath)) if err != nil { return []string{}, err } @@ -356,7 +355,7 @@ if fi.IsDir() { features = append(features, fi.Name()) // also read any sub-features - subdenties, err := ioutil.ReadDir(filepath.Join(rootPath, featuresSysPath, fi.Name())) + subdenties, err := os.ReadDir(filepath.Join(rootPath, featuresSysPath, fi.Name())) if err != nil { return []string{}, err } diff -Nru snapd-2.62+23.10/sandbox/apparmor/apparmor_test.go snapd-2.63+23.10/sandbox/apparmor/apparmor_test.go --- snapd-2.62+23.10/sandbox/apparmor/apparmor_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/apparmor/apparmor_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "errors" "fmt" "io" - "io/ioutil" "math" "os" "path/filepath" @@ -308,7 +307,7 @@ } c.Check(mockParserCmd.Calls(), DeepEquals, expectedCalls) - data, err := ioutil.ReadFile(filepath.Join(d, "stdin")) + data, err := os.ReadFile(filepath.Join(d, "stdin")) c.Assert(err, IsNil) c.Check(string(data), Equals, `profile snap-test { change_profile unsafe /**, @@ -493,7 +492,7 @@ err := apparmor.UpdateHomedirsTunable([]string{"/home/a", "/dir2"}) c.Assert(err, IsNil) configFile := filepath.Join(dirs.GlobalRootDir, "/etc/apparmor.d/tunables/home.d/snapd") - fileContents, err := ioutil.ReadFile(configFile) + fileContents, err := os.ReadFile(configFile) c.Assert(err, IsNil) c.Check(string(fileContents), Equals, `# Generated by snapd -- DO NOT EDIT!`+"\n"+`@{HOMEDIRS}+="/home/a" "/dir2"`+"\n") diff -Nru snapd-2.62+23.10/sandbox/apparmor/process.go snapd-2.63+23.10/sandbox/apparmor/process.go --- snapd-2.62+23.10/sandbox/apparmor/process.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/apparmor/process.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -37,7 +36,7 @@ // fallback procFile = filepath.Join(rootPath, fmt.Sprintf("proc/%v/attr/current", pid)) } - contents, err := ioutil.ReadFile(procFile) + contents, err := os.ReadFile(procFile) if os.IsNotExist(err) { return "unconfined", nil } else if err != nil { diff -Nru snapd-2.62+23.10/sandbox/apparmor/profile.go snapd-2.63+23.10/sandbox/apparmor/profile.go --- snapd-2.62+23.10/sandbox/apparmor/profile.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/apparmor/profile.go 2024-04-24 00:00:39.000000000 +0000 @@ -57,15 +57,15 @@ var ( runtimeNumCPU = runtime.NumCPU - osutilIsHomeUsingNFS = osutil.IsHomeUsingNFS + osutilIsHomeUsingRemoteFS = osutil.IsHomeUsingRemoteFS osutilIsRootWritableOverlay = osutil.IsRootWritableOverlay ) -// NfsSnippet contains extra permissions necessary for snaps and snap-confine -// to operate when NFS is used. This is an imperfect solution as this grants -// some network access to all the snaps on the system. +// RemoteFSSnippet contains extra permissions necessary for snaps and snap-confine +// to operate when remote file system, like nfs, is used. This is an imperfect +// solution as this grants some network access to all the snaps on the system. // For tracking see https://bugs.launchpad.net/apparmor/+bug/1724903 -var NfsSnippet = ` +var RemoteFSSnippet = ` # snapd autogenerated workaround for systems using NFS, for details see: # https://bugs.launchpad.net/ubuntu/+source/snapd/+bug/1662552 network inet, @@ -300,17 +300,19 @@ policy := make(map[string]osutil.FileState) - // Check if NFS is mounted at or under $HOME. Because NFS is not - // transparent to apparmor we must alter our profile to counter that and - // allow snap-confine to work. - if nfs, err := osutilIsHomeUsingNFS(); err != nil { - logger.Noticef("cannot determine if NFS is in use: %v", err) + // Check if a remote file system is mounted at or under $HOME. + // Because some remote file systems, like NFS, are not transparent + // to apparmor we must alter our profile to counter that and allow + // snap-confine to work. + if nfs, err := osutilIsHomeUsingRemoteFS(); err != nil { + logger.Noticef("cannot determine if remote file system is in use: %v", err) } else if nfs { + // TODO: rename this to remote-fs-support. policy["nfs-support"] = &osutil.MemoryFileState{ - Content: []byte(NfsSnippet), + Content: []byte(RemoteFSSnippet), Mode: 0644, } - logger.Noticef("snapd enabled NFS support, additional implicit network permissions granted") + logger.Noticef("snapd enabled remote file system support, additional implicit network permissions granted") } // Check if '/' is on overlayfs. If so, add the necessary rules for diff -Nru snapd-2.62+23.10/sandbox/apparmor/profile_test.go snapd-2.63+23.10/sandbox/apparmor/profile_test.go --- snapd-2.62+23.10/sandbox/apparmor/profile_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/apparmor/profile_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -391,21 +390,21 @@ dirs.SetRootDir(c.MkDir()) defer dirs.SetRootDir("") - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() restore = apparmor.MockLoadHomedirs(func() ([]string, error) { return nil, nil }) defer restore() - // No features, no NFS, no Overlay, no homedirs + // No features, no remote file system, no Overlay, no homedirs wasChanged, err := apparmor.SetupSnapConfineSnippets() c.Check(err, IsNil) c.Check(wasChanged, Equals, false) - // Because overlay/nfs is not used there are no local policy files but the - // directory was created. - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + // Because overlay/remote file system is not used there are no local + // policy files but the directory was created. + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) } @@ -424,7 +423,7 @@ restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Setup the system-params which is read by loadHomedirs in SetupSnapConfineSnippets @@ -437,11 +436,13 @@ // Homedirs was specified, so we expect an entry for each homedir in a // snippet 'homedirs' - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "homedirs") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) c.Assert(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name()), @@ -458,7 +459,7 @@ defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = apparmor.MockLoadHomedirs(func() ([]string, error) { return nil, fmt.Errorf("failed to load") }) defer restore() @@ -469,7 +470,7 @@ // Probing apparmor_parser capabilities failed, so nothing gets written // to the snap-confine policy directory - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) @@ -483,7 +484,7 @@ restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = apparmor.MockLoadHomedirs(func() ([]string, error) { return nil, nil }) defer restore() @@ -498,11 +499,13 @@ // Capability bpf is supported by the parser, so an extra policy file // for snap-confine is present - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "cap-bpf") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) c.Assert(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name()), @@ -517,7 +520,7 @@ defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = apparmor.MockLoadHomedirs(func() ([]string, error) { return nil, nil }) defer restore() @@ -532,7 +535,7 @@ // Probing apparmor_parser capabilities failed, so nothing gets written // to the snap-confine policy directory - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) @@ -547,7 +550,7 @@ // Make it appear as if overlay workaround was needed. restore := osutil.MockIsRootWritableOverlay(func() (string, error) { return "/upper", nil }) defer restore() - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() restore = apparmor.MockLoadHomedirs(func() ([]string, error) { return nil, nil }) defer restore() @@ -557,25 +560,27 @@ c.Check(wasChanged, Equals, true) // Because overlay is being used, we have the extra policy file. - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "overlay-root") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) // The policy allows upperdir access. - data, err := ioutil.ReadFile(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name())) + data, err := os.ReadFile(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name())) c.Assert(err, IsNil) c.Assert(string(data), testutil.Contains, "\"/upper/{,**/}\" r,") } -func (s *appArmorSuite) TestSetupSnapConfineSnippetsNFS(c *C) { +func (s *appArmorSuite) TestSetupSnapConfineSnippetsRemoteFS(c *C) { dirs.SetRootDir(c.MkDir()) defer dirs.SetRootDir("") - // Make it appear as if NFS workaround was needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return true, nil }) + // Make it appear as if remote file system workaround was needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return true, nil }) defer restore() restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) defer restore() @@ -586,12 +591,14 @@ c.Check(err, IsNil) c.Check(wasChanged, Equals, true) - // Because NFS is being used, we have the extra policy file. - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + // Because remote file system is being used, we have the extra policy file. + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) c.Assert(files[0].Name(), Equals, "nfs-support") - c.Assert(files[0].Mode(), Equals, os.FileMode(0644)) + fi, err := files[0].Info() + c.Assert(err, IsNil) + c.Assert(fi.Mode().Perm(), Equals, os.FileMode(0644)) c.Assert(files[0].IsDir(), Equals, false) // The policy allows network access. @@ -600,7 +607,7 @@ c.Assert(fn, testutil.FileContains, "network inet6,") } -// Test behavior when isHomeUsingNFS fails. +// Test behavior when isHomeUsingRemoteFS fails. func (s *appArmorSuite) TestSetupSnapConfineGeneratedPolicyError1(c *C) { dirs.SetRootDir(c.MkDir()) defer dirs.SetRootDir("") @@ -608,8 +615,8 @@ log, restore := logger.MockLogger() defer restore() - // Make it appear as if NFS detection was broken. - restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, fmt.Errorf("broken") }) + // Make it appear as if remote file system detection was broken. + restore = osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, fmt.Errorf("broken") }) defer restore() // Make it appear as if overlay was not used. @@ -621,20 +628,20 @@ defer restore() wasChanged, err := apparmor.SetupSnapConfineSnippets() - // NOTE: Errors in determining NFS are non-fatal to prevent snapd from - // failing to operate. A warning message is logged but system operates as - // if NFS was not active. + // NOTE: Errors in determining remote file system are non-fatal to prevent + // snapd from failing to operate. A warning message is logged but system + // operates as if remote file system was not active. c.Check(err, IsNil) c.Check(wasChanged, Equals, false) // While other stuff failed we created the policy directory and didn't // write any files to it. - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Check(err, IsNil) c.Check(files, HasLen, 0) // But an error was logged - c.Check(log.String(), testutil.Contains, "cannot determine if NFS is in use: broken") + c.Check(log.String(), testutil.Contains, "cannot determine if remote file system is in use: broken") } // Test behavior when MkdirAll fails @@ -661,8 +668,8 @@ dirs.SetRootDir(c.MkDir()) defer dirs.SetRootDir("") - // Make it appear as if NFS workaround was not needed. - restore := osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + // Make it appear as if remote file system workaround was not needed. + restore := osutil.MockIsHomeUsingRemoteFS(func() (bool, error) { return false, nil }) defer restore() // Make it appear as if overlay was not used. @@ -692,7 +699,7 @@ c.Check(wasChanged, Equals, false) // The policy directory was unchanged. - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 1) } @@ -711,7 +718,7 @@ c.Check(err, IsNil) // The files were removed - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) } @@ -728,7 +735,7 @@ c.Check(err, IsNil) // Nothing happens - files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir) + files, err := os.ReadDir(apparmor.SnapConfineAppArmorDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) } diff -Nru snapd-2.62+23.10/sandbox/cgroup/freezer.go snapd-2.63+23.10/sandbox/cgroup/freezer.go --- snapd-2.62+23.10/sandbox/cgroup/freezer.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/cgroup/freezer.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -94,7 +93,7 @@ return fmt.Errorf("cannot freeze processes of snap %q, %v", snapName, err) } for i := 0; i < 30; i++ { - data, err := ioutil.ReadFile(fname) + data, err := os.ReadFile(fname) if err != nil { return fmt.Errorf("cannot determine the freeze state of processes of snap %q, %v", snapName, err) } @@ -206,7 +205,7 @@ return fmt.Errorf("cannot freeze processes of snap %q, %v", snapName, err) } for i := 0; i < 30; i++ { - data, err := ioutil.ReadFile(fname) + data, err := os.ReadFile(fname) if err != nil { if os.IsNotExist(err) { // group may be gone diff -Nru snapd-2.62+23.10/sandbox/seccomp/export_test.go snapd-2.63+23.10/sandbox/seccomp/export_test.go --- snapd-2.62+23.10/sandbox/seccomp/export_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/seccomp/export_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,10 +20,10 @@ package seccomp func MockIoutilReadfile(newReadfile func(string) ([]byte, error)) (restorer func()) { - old := ioutilReadFile - ioutilReadFile = newReadfile + old := osReadFile + osReadFile = newReadfile return func() { - ioutilReadFile = old + osReadFile = old } } diff -Nru snapd-2.62+23.10/sandbox/seccomp/seccomp.go snapd-2.63+23.10/sandbox/seccomp/seccomp.go --- snapd-2.62+23.10/sandbox/seccomp/seccomp.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/seccomp/seccomp.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,7 @@ package seccomp import ( - "io/ioutil" + "os" "sort" "strings" "sync" @@ -58,10 +58,10 @@ return scp.probedActions } -var ioutilReadFile = ioutil.ReadFile +var osReadFile = os.ReadFile func probeActions() []string { - contents, err := ioutilReadFile("/proc/sys/kernel/seccomp/actions_avail") + contents, err := osReadFile("/proc/sys/kernel/seccomp/actions_avail") if err != nil { return []string{} } diff -Nru snapd-2.62+23.10/sandbox/selinux/selinux_linux.go snapd-2.63+23.10/sandbox/selinux/selinux_linux.go --- snapd-2.62+23.10/sandbox/selinux/selinux_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sandbox/selinux/selinux_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "fmt" - "io/ioutil" + "os" "path/filepath" "github.com/snapcore/snapd/osutil" @@ -48,7 +48,7 @@ return false, nil } - rawState, err := ioutil.ReadFile(filepath.Join(mnt, "enforce")) + rawState, err := os.ReadFile(filepath.Join(mnt, "enforce")) if err != nil { return false, err } diff -Nru snapd-2.62+23.10/secboot/secboot_hooks.go snapd-2.63+23.10/secboot/secboot_hooks.go --- snapd-2.62+23.10/secboot/secboot_hooks.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/secboot/secboot_hooks.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,7 +26,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "os" sb "github.com/snapcore/secboot" @@ -137,7 +136,7 @@ func unlockVolumeUsingSealedKeyFDERevealKeyV1(sealedEncryptionKeyFile, sourceDevice, targetDevice, mapperName string) (UnlockResult, error) { res := UnlockResult{IsEncrypted: true, PartDevice: sourceDevice} - sealedKey, err := ioutil.ReadFile(sealedEncryptionKeyFile) + sealedKey, err := os.ReadFile(sealedEncryptionKeyFile) if err != nil { return res, fmt.Errorf("cannot read sealed key file: %v", err) } diff -Nru snapd-2.62+23.10/secboot/secboot_tpm.go snapd-2.63+23.10/secboot/secboot_tpm.go --- snapd-2.62+23.10/secboot/secboot_tpm.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/secboot/secboot_tpm.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "crypto/rand" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -441,7 +440,7 @@ return err } - authKey, err := ioutil.ReadFile(params.TPMPolicyAuthKeyFile) + authKey, err := os.ReadFile(params.TPMPolicyAuthKeyFile) if err != nil { return fmt.Errorf("cannot read the policy auth key file: %v", err) } @@ -549,7 +548,7 @@ var currentLockoutAuth []byte if mode == TPMPartialReprovision { logger.Debugf("using existing lockout authorization") - d, err := ioutil.ReadFile(lockoutAuthFile) + d, err := os.ReadFile(lockoutAuthFile) if err != nil { return fmt.Errorf("cannot read existing lockout auth: %v", err) } @@ -703,7 +702,7 @@ } defer tpm.Close() - lockoutAuth, err := ioutil.ReadFile(lockoutAuthFile) + lockoutAuth, err := os.ReadFile(lockoutAuthFile) if err != nil { return fmt.Errorf("cannot read existing lockout auth: %v", err) } diff -Nru snapd-2.62+23.10/seed/helpers.go snapd-2.63+23.10/seed/helpers.go --- snapd-2.62+23.10/seed/helpers.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/seed/helpers.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" @@ -61,7 +60,7 @@ func loadAssertions(assertsDir string, loadedFunc func(*asserts.Ref) error) (*asserts.Batch, error) { logger.Debugf("loading assertions from %s", assertsDir) - dc, err := ioutil.ReadDir(assertsDir) + dc, err := os.ReadDir(assertsDir) if err != nil { if os.IsNotExist(err) { return nil, ErrNoAssertions diff -Nru snapd-2.62+23.10/seed/internal/options20.go snapd-2.63+23.10/seed/internal/options20.go --- snapd-2.62+23.10/seed/internal/options20.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/seed/internal/options20.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "strings" "gopkg.in/yaml.v2" @@ -63,7 +63,7 @@ func ReadOptions20(optionsFn string) (*Options20, error) { errPrefix := "cannot read grade dangerous options yaml" - yamlData, err := ioutil.ReadFile(optionsFn) + yamlData, err := os.ReadFile(optionsFn) if err != nil { return nil, fmt.Errorf("%s: %v", errPrefix, err) } diff -Nru snapd-2.62+23.10/seed/internal/seed_yaml.go snapd-2.63+23.10/seed/internal/seed_yaml.go --- snapd-2.62+23.10/seed/internal/seed_yaml.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/seed/internal/seed_yaml.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "strings" "gopkg.in/yaml.v2" @@ -62,7 +62,7 @@ func ReadSeedYaml(fn string) (*Seed16, error) { errPrefix := "cannot read seed yaml" - yamlData, err := ioutil.ReadFile(fn) + yamlData, err := os.ReadFile(fn) if err != nil { return nil, fmt.Errorf("%s: %v", errPrefix, err) } diff -Nru snapd-2.62+23.10/seed/seedwriter/manifest_test.go snapd-2.63+23.10/seed/seedwriter/manifest_test.go --- snapd-2.62+23.10/seed/seedwriter/manifest_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/seed/seedwriter/manifest_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,6 @@ package seedwriter_test import ( - "io/ioutil" "os" "path/filepath" "time" @@ -149,7 +148,7 @@ }, }, ) - contents, err := ioutil.ReadFile(filePath) + contents, err := os.ReadFile(filePath) c.Assert(err, IsNil) c.Check(string(contents), Equals, `canonical/base-set=4 canonical/opt-set 1 @@ -324,7 +323,7 @@ manifest.Write(manifestFile) // Read it back in and verify contents - data, err := ioutil.ReadFile(manifestFile) + data, err := os.ReadFile(manifestFile) c.Assert(err, IsNil) c.Assert(string(data), Equals, "canonical/base-set=1\n") diff -Nru snapd-2.62+23.10/seed/seedwriter/writer_test.go snapd-2.63+23.10/seed/seedwriter/writer_test.go --- snapd-2.62+23.10/seed/seedwriter/writer_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/seed/seedwriter/writer_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -890,7 +889,7 @@ }) } - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 3) @@ -1009,7 +1008,7 @@ }) } - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 6) @@ -1247,7 +1246,7 @@ } c.Check(assertedNum, Equals, 2) - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 6) @@ -2037,7 +2036,7 @@ }) } - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 8) @@ -2194,7 +2193,7 @@ }) } - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 8) @@ -2311,7 +2310,7 @@ c.Check(p, testutil.FilePresent) } - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 7) @@ -2381,11 +2380,11 @@ c.Check(filepath.Join(systemDir, "extra-snaps"), testutil.FileAbsent) // check auxiliary store info - l, err = ioutil.ReadDir(filepath.Join(systemDir, "snaps")) + l, err = os.ReadDir(filepath.Join(systemDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 1) - b, err := ioutil.ReadFile(filepath.Join(systemDir, "snaps", "aux-info.json")) + b, err := os.ReadFile(filepath.Join(systemDir, "snaps", "aux-info.json")) c.Assert(err, IsNil) var auxInfos map[string]map[string]interface{} err = json.Unmarshal(b, &auxInfos) @@ -3030,7 +3029,7 @@ systemDir := filepath.Join(s.opts.SeedDir, "systems", s.opts.Label) c.Check(systemDir, testutil.FilePresent) - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -3128,7 +3127,7 @@ systemDir := filepath.Join(s.opts.SeedDir, "systems", s.opts.Label) c.Check(systemDir, testutil.FilePresent) - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 5) @@ -3247,7 +3246,7 @@ c.Check(p, testutil.FilePresent) } - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -3433,7 +3432,7 @@ systemDir := filepath.Join(s.opts.SeedDir, "systems", s.opts.Label) c.Check(systemDir, testutil.FilePresent) - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -3589,7 +3588,7 @@ systemDir := filepath.Join(s.opts.SeedDir, "systems", s.opts.Label) c.Check(systemDir, testutil.FilePresent) - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -3705,7 +3704,7 @@ systemDir := filepath.Join(s.opts.SeedDir, "systems", s.opts.Label) c.Check(systemDir, testutil.FilePresent) - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -4101,7 +4100,7 @@ c.Check(systemDir, testutil.FilePresent) // check snaps - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -4211,7 +4210,7 @@ c.Assert(seedYaml.Snaps, HasLen, 4) // check snaps - l, err := ioutil.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) + l, err := os.ReadDir(filepath.Join(s.opts.SeedDir, "snaps")) c.Assert(err, IsNil) c.Check(l, HasLen, 4) @@ -4346,7 +4345,7 @@ err = w.WriteMeta() c.Assert(err, IsNil) - b, err := ioutil.ReadFile(path.Join(s.opts.SeedDir, "seed.manifest")) + b, err := os.ReadFile(path.Join(s.opts.SeedDir, "seed.manifest")) c.Assert(err, IsNil) c.Check(string(b), Equals, `core20 1 pc 1 @@ -4637,7 +4636,7 @@ // the manifest is tracking validation-sets, then we should not // see pc/pc-kernel in the manifest, instead it should just show // the validation-set tracking those. - m, err := ioutil.ReadFile(s.opts.ManifestPath) + m, err := os.ReadFile(s.opts.ManifestPath) c.Assert(err, IsNil) c.Check(string(m), Equals, `canonical/base-set 1 core20 1 diff -Nru snapd-2.62+23.10/snap/component.go snapd-2.63+23.10/snap/component.go --- snapd-2.62+23.10/snap/component.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/component.go 2024-04-24 00:00:39.000000000 +0000 @@ -70,6 +70,12 @@ return *csi == *other } +// ComponentBaseDir returns where components are to be found for the +// snap with name instanceName. +func ComponentsBaseDir(instanceName string) string { + return filepath.Join(BaseDir(instanceName), "components") +} + // componentPlaceInfo holds information about where to put a component in the // system. It implements ContainerPlaceInfo and should be used only via this // interface. @@ -77,22 +83,20 @@ // Name and revision for the component compName string compRevision Revision - // snapInstance and snapRevision identify the snap that uses this component. + // snapInstance identifies the snap that uses this component. snapInstance string - snapRevision Revision } var _ ContainerPlaceInfo = (*componentPlaceInfo)(nil) // MinimalComponentContainerPlaceInfo returns a ContainerPlaceInfo with just // the location information for a component of the given name and revision that -// is used by a snapInstance with revision snapRev. -func MinimalComponentContainerPlaceInfo(compName string, compRev Revision, snapInstance string, snapRev Revision) ContainerPlaceInfo { +// is used by a snapInstance. +func MinimalComponentContainerPlaceInfo(compName string, compRev Revision, snapInstance string) ContainerPlaceInfo { return &componentPlaceInfo{ compName: compName, compRevision: compRev, snapInstance: snapInstance, - snapRevision: snapRev, } } @@ -108,10 +112,10 @@ // MountDir returns the directory where a component gets mounted, which // will be of the form: -// /snaps//components// +// /snaps//components/mnt// func (c *componentPlaceInfo) MountDir() string { - return filepath.Join(BaseDir(c.snapInstance), "components", - c.snapRevision.String(), c.compName) + return filepath.Join(ComponentsBaseDir(c.snapInstance), "mnt", + c.compName, c.compRevision.String()) } // MountFile returns the path of the file to be mounted for a component, diff -Nru snapd-2.62+23.10/snap/component_test.go snapd-2.63+23.10/snap/component_test.go --- snapd-2.62+23.10/snap/component_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/component_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -277,14 +277,14 @@ } func (s *componentSuite) TestComponentContainerPlaceInfoImpl(c *C) { - cpi := snap.MinimalComponentContainerPlaceInfo("test-info", snap.R(25), "mysnap_instance", snap.R(11)) + cpi := snap.MinimalComponentContainerPlaceInfo("test-info", snap.R(25), "mysnap_instance") var contPi snap.ContainerPlaceInfo = cpi c.Check(contPi.ContainerName(), Equals, "mysnap_instance+test-info") c.Check(contPi.Filename(), Equals, "mysnap_instance+test-info_25.comp") c.Check(contPi.MountDir(), Equals, - filepath.Join(dirs.SnapMountDir, "mysnap_instance/components/11/test-info")) + filepath.Join(dirs.SnapMountDir, "mysnap_instance/components/mnt/test-info/25")) c.Check(contPi.MountFile(), Equals, filepath.Join(dirs.GlobalRootDir, "var/lib/snapd/snaps/mysnap_instance+test-info_25.comp")) c.Check(contPi.MountDescription(), Equals, "Mount unit for mysnap_instance+test-info, revision 25") diff -Nru snapd-2.62+23.10/snap/container.go snapd-2.63+23.10/snap/container.go --- snapd-2.62+23.10/snap/container.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/container.go 2024-04-24 00:00:39.000000000 +0000 @@ -104,39 +104,38 @@ // continue further either due to absolute symlinks or symlinks // that escape the container. // -// max depth reached?<------ -// /\ \ -// yes no \ -// / \ \ -// V V \ -// error path \ -// │ \ -// V \ -// read target \ -// │ \ -// V \ -// is absolute? \ -// /\ \ -// yes no \ -// / \ \ -// V V \ -// isExternal eval relative target \ -// + \ \ -// return target V \ -// escapes container? \ -// /\ \ -// yes no \ -// / \ | -// V V | -// isExternal is symlink? | -// + /\ | -// return target yes no │ -// / \ │ -// V V │ -// !isExternal path = target │ -// + \----------│ -// return target -// +// max depth reached?<------ +// /\ \ +// yes no \ +// / \ \ +// V V \ +// error path \ +// │ \ +// V \ +// read target \ +// │ \ +// V \ +// is absolute? \ +// /\ \ +// yes no \ +// / \ \ +// V V \ +// isExternal eval relative target \ +// + \ \ +// return target V \ +// escapes container? \ +// /\ \ +// yes no \ +// / \ | +// V V | +// isExternal is symlink? | +// + /\ | +// return target yes no │ +// / \ │ +// V V │ +// !isExternal path = target │ +// + \----------│ +// return target func evalSymlink(c Container, path string) (symlinkInfo, error) { var naiveTarget string diff -Nru snapd-2.62+23.10/snap/implicit.go snapd-2.63+23.10/snap/implicit.go --- snapd-2.62+23.10/snap/implicit.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/implicit.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "github.com/snapcore/snapd/osutil" ) @@ -37,7 +37,7 @@ return nil } - fileInfos, err := ioutil.ReadDir(hooksDir) + fileInfos, err := os.ReadDir(hooksDir) if err != nil { return fmt.Errorf("unable to read hooks directory: %s", err) } diff -Nru snapd-2.62+23.10/snap/info.go snapd-2.63+23.10/snap/info.go --- snapd-2.62+23.10/snap/info.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/info.go 2024-04-24 00:00:39.000000000 +0000 @@ -1402,6 +1402,12 @@ } func (e NotFoundError) Error() string { + if e.Revision.Unset() { + if e.Path != "" { + return fmt.Sprintf("cannot find current revision for snap %s: missing file %s", e.Snap, e.Path) + } + return fmt.Sprintf("cannot find current revision for snap %s", e.Snap) + } if e.Path != "" { return fmt.Sprintf("cannot find installed snap %q at revision %s: missing file %s", e.Snap, e.Revision, e.Path) } @@ -1503,7 +1509,7 @@ curFn := filepath.Join(dirs.SnapMountDir, snapName, "current") realFn, err := os.Readlink(curFn) if err != nil { - return nil, fmt.Errorf("cannot find current revision for snap %s: %s", snapName, err) + return nil, fmt.Errorf("%w: %s", NotFoundError{Snap: snapName, Revision: R(0)}, err) } rev := filepath.Base(realFn) revision, err := ParseRevision(rev) diff -Nru snapd-2.62+23.10/snap/info_test.go snapd-2.63+23.10/snap/info_test.go --- snapd-2.62+23.10/snap/info_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/info_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -377,6 +377,7 @@ snapInfo3, err := snap.ReadCurrentInfo("not-sample") c.Check(snapInfo3, IsNil) c.Assert(err, ErrorMatches, `cannot find current revision for snap not-sample:.*`) + c.Assert(errors.As(err, &snap.NotFoundError{}), Equals, true) } func (s *infoSuite) TestReadCurrentInfoWithInstance(c *C) { @@ -395,6 +396,7 @@ snapInfo3, err := snap.ReadCurrentInfo("sample_other") c.Check(snapInfo3, IsNil) c.Assert(err, ErrorMatches, `cannot find current revision for snap sample_other:.*`) + c.Assert(errors.As(err, &snap.NotFoundError{}), Equals, true) } func (s *infoSuite) TestInstallDate(c *C) { diff -Nru snapd-2.62+23.10/snap/pack/pack.go snapd-2.63+23.10/snap/pack/pack.go --- snapd-2.62+23.10/snap/pack/pack.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/pack/pack.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -162,7 +161,7 @@ } func excludesFile() (filename string, err error) { - tmpf, err := ioutil.TempFile("", ".snap-pack-exclude-") + tmpf, err := os.CreateTemp("", ".snap-pack-exclude-") if err != nil { return "", err } diff -Nru snapd-2.62+23.10/snap/pack/pack_test.go snapd-2.63+23.10/snap/pack/pack_test.go --- snapd-2.62+23.10/snap/pack/pack_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/pack/pack_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -124,7 +123,7 @@ metaDir := filepath.Join(tempdir, "meta") err := os.Mkdir(metaDir, 0755) c.Assert(err, IsNil) - err = ioutil.WriteFile(filepath.Join(metaDir, "component.yaml"), []byte(componentYaml), 0644) + err = os.WriteFile(filepath.Join(metaDir, "component.yaml"), []byte(componentYaml), 0644) c.Assert(err, IsNil) return tempdir } diff -Nru snapd-2.62+23.10/snap/snapdir/snapdir.go snapd-2.63+23.10/snap/snapdir/snapdir.go --- snapd-2.62+23.10/snap/snapdir/snapdir.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/snapdir/snapdir.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -85,7 +84,7 @@ } func (s *SnapDir) ReadFile(file string) (content []byte, err error) { - return ioutil.ReadFile(filepath.Join(s.path, file)) + return os.ReadFile(filepath.Join(s.path, file)) } func (s *SnapDir) ReadLink(file string) (string, error) { @@ -196,7 +195,7 @@ } func (s *SnapDir) ListDir(path string) ([]string, error) { - fileInfos, err := ioutil.ReadDir(filepath.Join(s.path, path)) + fileInfos, err := os.ReadDir(filepath.Join(s.path, path)) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/snap/squashfs/squashfs.go snapd-2.63+23.10/snap/squashfs/squashfs.go --- snapd-2.62+23.10/snap/squashfs/squashfs.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/squashfs/squashfs.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "bytes" "fmt" "io" - "io/ioutil" "os" "os/exec" "path" @@ -228,7 +227,7 @@ } func (s *Snap) withUnpackedFile(filePath string, f func(p string) error) error { - tmpdir, err := ioutil.TempDir("", "read-file") + tmpdir, err := os.MkdirTemp("", "read-file") if err != nil { return err } @@ -264,7 +263,7 @@ // ReadFile returns the content of a single file inside a squashfs snap. func (s *Snap) ReadFile(filePath string) (content []byte, err error) { err = s.withUnpackedFile(filePath, func(p string) (err error) { - content, err = ioutil.ReadFile(p) + content, err = os.ReadFile(p) return }) if err != nil { diff -Nru snapd-2.62+23.10/snap/squashfs/squashfs_test.go snapd-2.63+23.10/snap/squashfs/squashfs_test.go --- snapd-2.62+23.10/snap/squashfs/squashfs_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/squashfs/squashfs_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,7 @@ "bytes" "errors" "fmt" - "io/ioutil" + "io" "math" "os" "os/exec" @@ -122,7 +122,7 @@ restore := osutil.MockMountInfo("") s.AddCleanup(restore) - s.outf, err = ioutil.TempFile(c.MkDir(), "") + s.outf, err = os.CreateTemp(c.MkDir(), "") c.Assert(err, IsNil) s.oldStdout, s.oldStderr = os.Stdout, os.Stderr os.Stdout, os.Stderr = s.outf, s.outf @@ -134,7 +134,7 @@ // this ensures things were quiet _, err := s.outf.Seek(0, 0) c.Assert(err, IsNil) - outbuf, err := ioutil.ReadAll(s.outf) + outbuf, err := io.ReadAll(s.outf) c.Assert(err, IsNil) c.Check(string(outbuf), Equals, "") } diff -Nru snapd-2.62+23.10/snap/sysparams/sysparams.go snapd-2.63+23.10/snap/sysparams/sysparams.go --- snapd-2.62+23.10/snap/sysparams/sysparams.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snap/sysparams/sysparams.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bufio" "fmt" - "io/ioutil" + "os" "strings" "github.com/snapcore/snapd/dirs" @@ -98,7 +98,7 @@ return &SystemParams{rootdir: rootdir}, nil } - data, err := ioutil.ReadFile(sspFile) + data, err := os.ReadFile(sspFile) if err != nil { return nil, fmt.Errorf("cannot read system-params: %v", err) } diff -Nru snapd-2.62+23.10/snapdtool/cmdutil.go snapd-2.63+23.10/snapdtool/cmdutil.go --- snapd-2.62+23.10/snapdtool/cmdutil.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snapdtool/cmdutil.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,7 @@ "bytes" "debug/elf" "fmt" - "io/ioutil" + "io" "os" "os/exec" "path/filepath" @@ -44,7 +44,7 @@ for _, prog := range el.Progs { if prog.Type == elf.PT_INTERP { r := prog.Open() - interp, err := ioutil.ReadAll(r) + interp, err := io.ReadAll(r) if err != nil { return "", nil } diff -Nru snapd-2.62+23.10/snapdtool/tool_linux_test.go snapd-2.63+23.10/snapdtool/tool_linux_test.go --- snapd-2.62+23.10/snapdtool/tool_linux_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snapdtool/tool_linux_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,6 @@ package snapdtool_test import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -89,7 +88,7 @@ ` func benchmarkCSRE(b *testing.B, data string) { - tempdir, err := ioutil.TempDir("", "") + tempdir, err := os.MkdirTemp("", "") if err != nil { b.Fatalf("tempdir: %v", err) } diff -Nru snapd-2.62+23.10/snapdtool/tool_test.go snapd-2.63+23.10/snapdtool/tool_test.go --- snapd-2.62+23.10/snapdtool/tool_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/snapdtool/tool_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -354,7 +354,7 @@ // the internal one is not executable p, err := snapdtool.InternalToolPath("snapd") c.Assert(err, IsNil) - c.Check(p, Equals, filepath.Join(dirs.GlobalRootDir, "/usr/lib/snapd/snapd")) + c.Check(p, Equals, filepath.Join(dirs.DistroLibExecDir, "snapd")) } func (s *toolSuite) TestInternalToolPathWithLibexecdirLocation(c *C) { diff -Nru snapd-2.62+23.10/spread.yaml snapd-2.63+23.10/spread.yaml --- snapd-2.62+23.10/spread.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/spread.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -231,12 +231,14 @@ location: snapd-spread/us-east1-b halt-timeout: 2h systems: - - ubuntu-18.04-64: - workers: 6 - ubuntu-20.04-64: - workers: 6 + workers: 8 - ubuntu-22.04-64: - workers: 6 + workers: 8 + - ubuntu-23.10-64: + workers: 8 + - ubuntu-24.04-64: + workers: 8 google-nested: type: google @@ -372,6 +374,9 @@ - ubuntu-23.10-64: username: ubuntu password: ubuntu + - ubuntu-24.04-64: + username: ubuntu + password: ubuntu - debian-11-64: username: debian password: debian @@ -1064,7 +1069,7 @@ restore: | "$TESTSLIB"/prepare-restore.sh --restore-suite - tests/perf/: + tests/perf/main/: summary: Performance and Load tests backends: [external] environment: @@ -1286,7 +1291,7 @@ . "$TESTSLIB"/pkgdb.sh distro_purge_package qemu genisoimage sshpass qemu-kvm cloud-image-utils xz-utils - tests/nested/perf/: + tests/perf/nested/: summary: Performance and Load tests preparation suite backends: [google-nested, google-nested-dev, qemu-nested] environment: diff -Nru snapd-2.62+23.10/store/auth.go snapd-2.63+23.10/store/auth.go --- snapd-2.62+23.10/store/auth.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/auth.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "sync" @@ -269,7 +268,7 @@ if resp.StatusCode == 200 || resp.StatusCode == 202 { return json.NewDecoder(resp.Body).Decode(&responseData) } - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 1e6)) // do our best to read the body + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1e6)) // do our best to read the body return fmt.Errorf("store server returned status %d and body %q", resp.StatusCode, body) }) if err != nil { diff -Nru snapd-2.62+23.10/store/auth_u1_test.go snapd-2.63+23.10/store/auth_u1_test.go --- snapd-2.62+23.10/store/auth_u1_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/auth_u1_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "io" - "io/ioutil" "net/http" "net/http/httptest" "time" @@ -240,7 +239,7 @@ func (s *authTestSuite) TestRefreshDischargeMacaroonError(c *C) { n := 0 mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Assert(data, NotNil) c.Assert(string(data), Equals, `{"discharge_macaroon":"soft-expired-serialized-discharge-macaroon"}`) @@ -374,7 +373,7 @@ func (s *authTestSuite) TestRequestDeviceSession(c *C) { mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(jsonReq), Equals, `{"device-session-request":"session-request","model-assertion":"model-assertion","serial-assertion":"serial-assertion"}`) c.Check(r.Header.Get("X-Device-Authorization"), Equals, "") @@ -391,7 +390,7 @@ func (s *authTestSuite) TestRequestDeviceSessionWithPreviousSession(c *C) { mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(jsonReq), Equals, `{"device-session-request":"session-request","model-assertion":"model-assertion","serial-assertion":"serial-assertion"}`) c.Check(r.Header.Get("X-Device-Authorization"), Equals, `Macaroon root="previous-session"`) diff -Nru snapd-2.62+23.10/store/cache.go snapd-2.63+23.10/store/cache.go --- snapd-2.62+23.10/store/cache.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/cache.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -148,7 +147,7 @@ func (cm *CacheManager) count() int { // TODO: Use something more effective than a list of all entries // here. This will waste a lot of memory on large dirs. - if l, err := ioutil.ReadDir(cm.cacheDir); err == nil { + if l, err := os.ReadDir(cm.cacheDir); err == nil { return len(l) } return 0 @@ -161,10 +160,22 @@ // cleanup ensures that only maxItems are stored in the cache func (cm *CacheManager) cleanup() error { - fil, err := ioutil.ReadDir(cm.cacheDir) + entries, err := os.ReadDir(cm.cacheDir) if err != nil { return err } + + // we need the modtime so convert to FileInfo + fil := make([]os.FileInfo, 0, len(entries)) + for _, entry := range entries { + fi, err := entry.Info() + if err != nil { + return err + } + + fil = append(fil, fi) + } + if len(fil) <= cm.maxItems { return nil } diff -Nru snapd-2.62+23.10/store/store_action_fetch_assertions_test.go snapd-2.63+23.10/store/store_action_fetch_assertions_test.go --- snapd-2.62+23.10/store/store_action_fetch_assertions_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/store_action_fetch_assertions_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -81,7 +81,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -187,7 +187,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -318,7 +318,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -413,7 +413,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -584,7 +584,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -734,7 +734,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -899,7 +899,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` diff -Nru snapd-2.62+23.10/store/store_action_test.go snapd-2.63+23.10/store/store_action_test.go --- snapd-2.62+23.10/store/store_action_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/store_action_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -95,7 +94,7 @@ c.Check(r.Header.Get("Snap-Device-Location"), Equals, "") c.Check(r.Header.Get("Snap-Classic"), Equals, "false") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -206,7 +205,7 @@ c.Check(r.Header.Get("Snap-Device-Architecture"), Equals, arch.DpkgArchitecture()) c.Check(r.Header.Get("Snap-Classic"), Equals, "false") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -305,7 +304,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -369,7 +368,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -422,7 +421,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -511,7 +510,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -672,7 +671,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -828,7 +827,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -923,7 +922,7 @@ c.Check(r.Header.Get("Snap-Device-Location"), Equals, `cloud-name="gcp" region="us-west1" availability-zone="us-west1-b"`) c.Check(r.Header.Get("Snap-Classic"), Equals, "true") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1018,7 +1017,7 @@ c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) c.Check(r.Header.Get("Snap-Accept-Delta-Format"), Equals, "xdelta3") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1104,7 +1103,7 @@ c.Check(r.Header.Get("Snap-Refresh-Managed"), Equals, "true") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1222,7 +1221,7 @@ c.Check(r.Header.Get("Snap-Device-Architecture"), Equals, arch.DpkgArchitecture()) c.Check(r.Header.Get("Snap-Classic"), Equals, "false") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1335,7 +1334,7 @@ c.Check(r.Header.Get("Snap-Device-Architecture"), Equals, arch.DpkgArchitecture()) c.Check(r.Header.Get("Snap-Classic"), Equals, "false") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1505,7 +1504,7 @@ c.Check(r.Header.Get("Snap-Device-Architecture"), Equals, arch.DpkgArchitecture()) c.Check(r.Header.Get("Snap-Classic"), Equals, "false") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1582,7 +1581,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1763,7 +1762,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -1887,7 +1886,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2239,7 +2238,7 @@ io.WriteString(w, `{"nonce": "1234567890:9876543210"}`) case authSessionPath: // validity of request - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req map[string]string err = json.Unmarshal(jsonReq, &req) @@ -2301,7 +2300,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2404,7 +2403,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2515,7 +2514,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2609,7 +2608,7 @@ // mock version that doesn't support `held` field (e.g. 52) w.Header().Set("Snap-Store-Version", "52") - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2708,7 +2707,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2800,7 +2799,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -2946,7 +2945,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -3060,7 +3059,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -3144,7 +3143,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` @@ -3228,7 +3227,7 @@ // check device authorization is set, implicitly checking doRequest was used c.Check(r.Header.Get("Snap-Device-Authorization"), Equals, `Macaroon root="device-macaroon"`) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req struct { Context []map[string]interface{} `json:"context"` diff -Nru snapd-2.62+23.10/store/store_download_test.go snapd-2.63+23.10/store/store_download_test.go --- snapd-2.62+23.10/store/store_download_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/store_download_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "crypto" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -599,7 +598,7 @@ }) defer restore() - w, err := ioutil.TempFile("", "") + w, err := os.CreateTemp("", "") c.Assert(err, IsNil) defer os.Remove(w.Name()) @@ -764,7 +763,7 @@ restore := store.MockDoDownloadReq(func(ctx context.Context, url *url.URL, cdnHeader string, resume int64, s *store.Store, user *auth.UserState) (*http.Response, error) { c.Check(url.String(), Equals, "URL") r := &http.Response{ - Body: ioutil.NopCloser(bytes.NewReader(expectedContent[resume:])), + Body: io.NopCloser(bytes.NewReader(expectedContent[resume:])), } if resume > 0 { r.StatusCode = 206 @@ -884,6 +883,10 @@ } func (s *storeDownloadSuite) TestTransferSpeedMonitoringWriterHappy(c *C) { + if os.Getenv("SNAPD_SKIP_SLOW_TESTS") != "" { + c.Skip("skipping slow test") + } + origCtx := context.TODO() w, ctx := store.NewTransferSpeedMonitoringWriterAndContext(origCtx, 50*time.Millisecond, 1) @@ -908,6 +911,10 @@ } func (s *storeDownloadSuite) TestTransferSpeedMonitoringWriterUnhappy(c *C) { + if os.Getenv("SNAPD_SKIP_SLOW_TESTS") != "" { + c.Skip("skipping slow test") + } + origCtx := context.TODO() w, ctx := store.NewTransferSpeedMonitoringWriterAndContext(origCtx, 50*time.Millisecond, 1000) diff -Nru snapd-2.62+23.10/store/store_test.go snapd-2.63+23.10/store/store_test.go --- snapd-2.62+23.10/store/store_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/store_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -26,7 +26,6 @@ "errors" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -475,7 +474,7 @@ defer response.Body.Close() c.Assert(err, IsNil) - responseData, err := ioutil.ReadAll(response.Body) + responseData, err := io.ReadAll(response.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") } @@ -511,7 +510,7 @@ defer response.Body.Close() c.Assert(err, IsNil) - responseData, err := ioutil.ReadAll(response.Body) + responseData, err := io.ReadAll(response.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") } @@ -544,7 +543,7 @@ defer response.Body.Close() c.Assert(err, IsNil) - responseData, err := ioutil.ReadAll(response.Body) + responseData, err := io.ReadAll(response.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") } @@ -589,7 +588,7 @@ defer response.Body.Close() c.Assert(err, IsNil) - responseData, err := ioutil.ReadAll(response.Body) + responseData, err := io.ReadAll(response.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") c.Check(refreshDischargeEndpointHit, Equals, true) @@ -641,7 +640,7 @@ io.WriteString(w, `{"nonce": "1234567890:9876543210"}`) case authSessionPath: // validity of request - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req map[string]string err = json.Unmarshal(jsonReq, &req) @@ -687,7 +686,7 @@ io.WriteString(w, `{"nonce": "1234567890:9876543210"}`) case authSessionPath: // validity of request - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req map[string]string err = json.Unmarshal(jsonReq, &req) @@ -775,7 +774,7 @@ io.WriteString(w, `{"nonce": "1234567890:9876543210"}`) case authSessionPath: // validity of request - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req map[string]string err = json.Unmarshal(jsonReq, &req) @@ -815,7 +814,7 @@ c.Assert(err, IsNil) defer response.Body.Close() - responseData, err := ioutil.ReadAll(response.Body) + responseData, err := io.ReadAll(response.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") c.Check(deviceSessionRequested, Equals, true) @@ -866,7 +865,7 @@ io.WriteString(w, `{"nonce": "1234567890:9876543210"}`) case authSessionPath: // validity of request - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) var req map[string]string err = json.Unmarshal(jsonReq, &req) @@ -907,7 +906,7 @@ c.Check(resp.StatusCode, Equals, 200) - responseData, err := ioutil.ReadAll(resp.Body) + responseData, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") c.Check(refreshDischargeEndpointHit, Equals, true) @@ -941,7 +940,7 @@ defer response.Body.Close() c.Assert(err, IsNil) - responseData, err := ioutil.ReadAll(response.Body) + responseData, err := io.ReadAll(response.Body) c.Assert(err, IsNil) c.Check(string(responseData), Equals, "response-data") } @@ -4008,7 +4007,7 @@ c.Check(r.Header.Get("Accept"), Equals, store.JsonContentType) c.Check(r.Header.Get("Content-Type"), Equals, store.JsonContentType) c.Check(r.URL.Path, Equals, buyPath) - jsonReq, err := ioutil.ReadAll(r.Body) + jsonReq, err := io.ReadAll(r.Body) c.Assert(err, IsNil) c.Check(string(jsonReq), Equals, test.expectedInput) if test.buyErrorCode == "" { diff -Nru snapd-2.62+23.10/store/tooling/auth.go snapd-2.63+23.10/store/tooling/auth.go --- snapd-2.62+23.10/store/tooling/auth.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/store/tooling/auth.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,7 +24,6 @@ "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" @@ -61,7 +60,7 @@ } var err error - data, err = ioutil.ReadFile(authFn) + data, err = os.ReadFile(authFn) if err != nil { return nil, fmt.Errorf("cannot read auth file %q: %v", authFn, err) } diff -Nru snapd-2.62+23.10/syscheck/squashfs.go snapd-2.63+23.10/syscheck/squashfs.go --- snapd-2.62+23.10/syscheck/squashfs.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/syscheck/squashfs.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "encoding/base64" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -82,13 +81,13 @@ return err } - tmpSquashfsFile, err := ioutil.TempFile("", "syscheck-squashfs-") + tmpSquashfsFile, err := os.CreateTemp("", "syscheck-squashfs-") if err != nil { return err } defer os.Remove(tmpSquashfsFile.Name()) - tmpMountDir, err := ioutil.TempDir("", "syscheck-mountpoint-") + tmpMountDir, err := os.MkdirTemp("", "syscheck-mountpoint-") if err != nil { return err } @@ -127,7 +126,7 @@ }() // syscheck check the - content, err := ioutil.ReadFile(filepath.Join(tmpMountDir, "canary.txt")) + content, err := os.ReadFile(filepath.Join(tmpMountDir, "canary.txt")) if err != nil { return fmt.Errorf("squashfs mount returned no err but canary file cannot be read") } diff -Nru snapd-2.62+23.10/syscheck/version.go snapd-2.63+23.10/syscheck/version.go --- snapd-2.62+23.10/syscheck/version.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/syscheck/version.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "fmt" - "io/ioutil" + "os" "path/filepath" "strings" @@ -41,7 +41,7 @@ // and set to proper value func supportsMayDetachMounts(kver string) error { p := filepath.Join(dirs.GlobalRootDir, "/proc/sys/fs/may_detach_mounts") - value, err := ioutil.ReadFile(p) + value, err := os.ReadFile(p) if err != nil { return fmt.Errorf("cannot read the value of fs.may_detach_mounts kernel parameter: %v", err) } diff -Nru snapd-2.62+23.10/sysconfig/cloudinit.go snapd-2.63+23.10/sysconfig/cloudinit.go --- snapd-2.62+23.10/sysconfig/cloudinit.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sysconfig/cloudinit.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -169,14 +168,14 @@ // intersect with what we support dstFileName := filepath.Base(in) - filteredFile, err := ioutil.TempFile("", dstFileName) + filteredFile, err := os.CreateTemp("", dstFileName) if err != nil { return "", err } defer filteredFile.Close() // open the source and unmarshal it as yaml - unfilteredFileBytes, err := ioutil.ReadFile(in) + unfilteredFileBytes, err := os.ReadFile(in) if err != nil { return "", err } @@ -238,7 +237,7 @@ // TODO: are there other keys in addition to those that we support in // filtering that might mention datasources ? - b, err := ioutil.ReadFile(configFile) + b, err := os.ReadFile(configFile) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/sysconfig/cloudinit_test.go snapd-2.63+23.10/sysconfig/cloudinit_test.go --- snapd-2.62+23.10/sysconfig/cloudinit_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/sysconfig/cloudinit_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -1445,7 +1444,7 @@ } // otherwise we have expected output in the file - b, err := ioutil.ReadFile(out) + b, err := os.ReadFile(out) c.Assert(err, IsNil, comment) c.Assert(string(b), Equals, t.outStr, comment) } diff -Nru snapd-2.62+23.10/systemd/systemd.go snapd-2.63+23.10/systemd/systemd.go --- snapd-2.62+23.10/systemd/systemd.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/systemd/systemd.go 2024-04-24 00:00:39.000000000 +0000 @@ -355,6 +355,9 @@ // PreventRestartIfModified is set if we do not want to restart the // mount unit if even though it was modified PreventRestartIfModified bool + // StartBeforeDriversLoad is set if the unit is needed before + // udevd starts to run rules + StartBeforeDriversLoad bool } // Systemd exposes a minimal interface to manage systemd via the systemctl command. @@ -1395,11 +1398,14 @@ // Note that WantedBy=multi-user.target and Before=local-fs.target are // only used to allow downgrading to an older version of snapd. -const regularMountUnitTmpl = `[Unit] +// +// We want (see isBeforeDrivers) some snaps and components to be mounted before +// modules are loaded (that is before systemd-{udevd,modules-load}). +const snapMountUnitTmpl = `[Unit] Description={{.Description}} After=snapd.mounts-pre.target -Before=snapd.mounts.target -Before=local-fs.target +Before=snapd.mounts.target{{if isBeforeDrivers .MountUnitType}} +Before=systemd-udevd.service systemd-modules-load.service{{end}} [Mount] What={{.What}} @@ -1416,33 +1422,13 @@ {{- end}} ` -// We want kernel-modules components to be mounted before modules are -// loaded (that is before systemd-{udevd,modules-load}). -const beforeDriversLoadUnitTmpl = `[Unit] -Description={{.Description}} -DefaultDependencies=no -After=systemd-remount-fs.service -Before=sysinit.target -Before=systemd-udevd.service systemd-modules-load.service -Before=umount.target -Conflicts=umount.target - -[Mount] -What={{.What}} -Where={{.Where}} -Type={{.Fstype}} -Options={{join .Options ","}} - -[Install] -WantedBy=sysinit.target -{{- with .Origin}} -X-SnapdOrigin={{.}} -{{- end}} -` +func isBeforeDriversLoadMountUnit(mType MountUnitType) bool { + return mType == BeforeDriversLoadMountUnit +} -var templateFuncs = template.FuncMap{"join": strings.Join} -var parsedRegularMountUnitTmpl = template.Must(template.New("unit").Funcs(templateFuncs).Parse(regularMountUnitTmpl)) -var parsedKernelDriversMountUnitTmpl = template.Must(template.New("unit").Funcs(templateFuncs).Parse(beforeDriversLoadUnitTmpl)) +var templateFuncs = template.FuncMap{"join": strings.Join, + "isBeforeDrivers": isBeforeDriversLoadMountUnit} +var parsedMountUnitTmpl = template.Must(template.New("unit").Funcs(templateFuncs).Parse(snapMountUnitTmpl)) const ( snappyOriginModule = "X-SnapdOrigin" @@ -1455,18 +1441,7 @@ mu := MountUnitPathWithLifetime(u.Lifetime, u.Where) var unitContent bytes.Buffer - - var mntUnitTmpl *template.Template - switch u.MountUnitType { - case RegularMountUnit: - mntUnitTmpl = parsedRegularMountUnitTmpl - case BeforeDriversLoadMountUnit: - mntUnitTmpl = parsedKernelDriversMountUnitTmpl - default: - return "", mountUnchanged, fmt.Errorf("internal error: unknown mount unit type") - } - - if err := mntUnitTmpl.Execute(&unitContent, &u); err != nil { + if err := parsedMountUnitTmpl.Execute(&unitContent, &u); err != nil { return "", mountUnchanged, fmt.Errorf("cannot generate mount unit: %v", err) } @@ -1526,7 +1501,7 @@ options = append(options, "bind") hostFsType = "none" } - return s.EnsureMountUnitFileWithOptions(&MountUnitOptions{ + mountOptions := &MountUnitOptions{ Lifetime: Persistent, Description: description, What: what, @@ -1534,7 +1509,11 @@ Fstype: hostFsType, Options: options, PreventRestartIfModified: flags.PreventRestartIfModified, - }) + } + if flags.StartBeforeDriversLoad { + mountOptions.MountUnitType = BeforeDriversLoadMountUnit + } + return s.EnsureMountUnitFileWithOptions(mountOptions) } func (s *systemd) EnsureMountUnitFileWithOptions(unitOptions *MountUnitOptions) (string, error) { diff -Nru snapd-2.62+23.10/systemd/systemd_test.go snapd-2.63+23.10/systemd/systemd_test.go --- snapd-2.62+23.10/systemd/systemd_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/systemd/systemd_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strconv" @@ -186,7 +185,7 @@ return nil, err } - return ioutil.NopCloser(bytes.NewReader(out)), err + return io.NopCloser(bytes.NewReader(out)), err } func (s *SystemdTestSuite) TestMockVersion(c *C) { @@ -945,7 +944,7 @@ reader, err := New(SystemMode, s.rep).LogReader([]string{"foo"}, 24, false, false) c.Check(err, IsNil) - logs, err := ioutil.ReadAll(reader) + logs, err := io.ReadAll(reader) c.Assert(err, IsNil) c.Check(string(logs), Equals, expected) c.Check(s.jns, DeepEquals, []string{"24"}) @@ -1169,7 +1168,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1207,7 +1205,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1232,7 +1229,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1301,7 +1297,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1340,7 +1335,6 @@ Description=Mount unit for foodir, revision x1 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1361,6 +1355,43 @@ }) } +func (s *SystemdTestSuite) TestAddMountUnitStartBeforeDriversLoad(c *C) { + restore := squashfs.MockNeedsFuse(false) + defer restore() + + mockSnapPath := filepath.Join(c.MkDir(), "/var/lib/snappy/snaps/foo_1.0.snap") + makeMockFile(c, mockSnapPath) + + mountUnitName, err := New(SystemMode, nil).EnsureMountUnitFile("Mount unit for foo, revision x1", mockSnapPath, "/snap/snapname/x1", "squashfs", systemd.EnsureMountUnitFlags{StartBeforeDriversLoad: true}) + c.Assert(err, IsNil) + defer os.Remove(mountUnitName) + + c.Assert(filepath.Join(dirs.SnapServicesDir, mountUnitName), testutil.FileEquals, fmt.Sprintf(` +[Unit] +Description=Mount unit for foo, revision x1 +After=snapd.mounts-pre.target +Before=snapd.mounts.target +Before=systemd-udevd.service systemd-modules-load.service + +[Mount] +What=%s +Where=/snap/snapname/x1 +Type=squashfs +Options=nodev,ro,x-gdu.hide,x-gvfs-hide +LazyUnmount=yes + +[Install] +WantedBy=snapd.mounts.target +WantedBy=multi-user.target +`[1:], mockSnapPath)) + + c.Assert(s.argses, DeepEquals, [][]string{ + {"daemon-reload"}, + {"--no-reload", "enable", "snap-snapname-x1.mount"}, + {"restart", "snap-snapname-x1.mount"}, + }) +} + func (s *SystemdTestSuite) TestAddMountUnitTransient(c *C) { rootDir := dirs.GlobalRootDir @@ -1388,7 +1419,6 @@ Description=Mount unit for foo via bar After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1435,21 +1465,20 @@ c.Assert(filepath.Join(dirs.SnapServicesDir, mountUnitName), testutil.FileEquals, fmt.Sprintf(`[Unit] Description=Mount unit for wifi kernel modules component -DefaultDependencies=no -After=systemd-remount-fs.service -Before=sysinit.target +After=snapd.mounts-pre.target +Before=snapd.mounts.target Before=systemd-udevd.service systemd-modules-load.service -Before=umount.target -Conflicts=umount.target [Mount] What=%s Where=/run/mnt/kernel-modules/5.15.0-91-generic/mykmod/ Type=squashfs Options=nodev,ro,x-gdu.hide,x-gvfs-hide +LazyUnmount=yes [Install] -WantedBy=sysinit.target +WantedBy=snapd.mounts.target +WantedBy=multi-user.target `, mockSnapPath)) escapedUnit := "run-mnt-kernel\\x2dmodules-5.15.0\\x2d91\\x2dgeneric-mykmod.mount" c.Assert(s.argses, DeepEquals, [][]string{ @@ -1483,21 +1512,20 @@ c.Assert(filepath.Join(dirs.SnapServicesDir, mountUnitName), testutil.FileEquals, fmt.Sprintf(`[Unit] Description=Mount unit for kernel modules in kernel tree -DefaultDependencies=no -After=systemd-remount-fs.service -Before=sysinit.target +After=snapd.mounts-pre.target +Before=snapd.mounts.target Before=systemd-udevd.service systemd-modules-load.service -Before=umount.target -Conflicts=umount.target [Mount] What=/run/mnt/kernel-modules/5.15.0-91-generic/mykmod/modules/5.15.0-91-generic Where=/usr/lib/modules/5.15.0-91-generic/updates/mykmod/ Type=none Options=bind +LazyUnmount=yes [Install] -WantedBy=sysinit.target +WantedBy=snapd.mounts.target +WantedBy=multi-user.target `)) escapedUnit := "usr-lib-modules-5.15.0\\x2d91\\x2dgeneric-updates-mykmod.mount" c.Assert(s.argses, DeepEquals, [][]string{ @@ -1530,7 +1558,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1576,7 +1603,6 @@ Description=Mount unit for foo, revision x1 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1618,7 +1644,6 @@ Description=Mount unit for foo, revision x1 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1884,7 +1909,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1939,7 +1963,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -1984,7 +2007,6 @@ Description=Mount unit for foo, revision 42 After=snapd.mounts-pre.target Before=snapd.mounts.target -Before=local-fs.target [Mount] What=%s @@ -2061,21 +2083,20 @@ c.Check(filepath.Join(dirs.SnapServicesDir, mountUnitName), testutil.FileEquals, fmt.Sprintf(`[Unit] Description=Early mount unit for kernel snap -DefaultDependencies=no -After=systemd-remount-fs.service -Before=sysinit.target +After=snapd.mounts-pre.target +Before=snapd.mounts.target Before=systemd-udevd.service systemd-modules-load.service -Before=umount.target -Conflicts=umount.target [Mount] What=%s Where=/run/mnt/kernel-snaps/pc-kernel/1 Type=squashfs Options=nodev,ro,x-gdu.hide,x-gvfs-hide +LazyUnmount=yes [Install] -WantedBy=sysinit.target +WantedBy=snapd.mounts.target +WantedBy=multi-user.target `, mockSnapPath)) } @@ -2223,7 +2244,7 @@ } func (s *SystemdTestSuite) TestListMountUnitsHappy(c *C) { - tmpDir, err := ioutil.TempDir("/tmp", "snapd-systemd-test-list-mounts-*") + tmpDir, err := os.MkdirTemp("/tmp", "snapd-systemd-test-list-mounts-*") c.Assert(err, IsNil) defer os.RemoveAll(tmpDir) diff -Nru snapd-2.62+23.10/tests/bin/tests.invariant snapd-2.63+23.10/tests/bin/tests.invariant --- snapd-2.62+23.10/tests/bin/tests.invariant 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/bin/tests.invariant 2024-04-24 00:00:39.000000000 +0000 @@ -199,6 +199,14 @@ fi } +check_fakestore_cleaned() { + # Check if fakestore was properly cleaned to avoid leaking into other tests. + if [ -f "/etc/systemd/system/snapd.service.d/store.conf" ]; then + echo "/etc/systemd/system/snapd.service.d/store.conf was not cleaned properly" + exit 1 + fi +} + check_invariant() { case "$1" in root-files-in-home) @@ -225,6 +233,9 @@ segmentation-violations) check_segmentation_violations "$1" ;; + check-fakestore-cleaned) + check_fakestore_cleaned + ;; *) echo "tests.invariant: unknown invariant $1" >&2 exit 1 @@ -233,7 +244,7 @@ } main() { - ALL_INVARIANTS="root-files-in-home crashed-snap-confine lxcfs-mounted stray-dbus-daemon leftover-defer-sh broken-snaps cgroup-scopes segmentation-violations" + ALL_INVARIANTS="root-files-in-home crashed-snap-confine lxcfs-mounted stray-dbus-daemon leftover-defer-sh broken-snaps cgroup-scopes segmentation-violations check-fakestore-cleaned" case "$action" in check) diff -Nru snapd-2.62+23.10/tests/core/apt/task.yaml snapd-2.63+23.10/tests/core/apt/task.yaml --- snapd-2.62+23.10/tests/core/apt/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/apt/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Ensure that the core systems have a special apt placeholder +details: | + Check apt command is not available in uc20+ and there is a fake apt-get script + for uc16 and uc18. + systems: [ubuntu-core-*] execute: | diff -Nru snapd-2.62+23.10/tests/core/backlight/task.yaml snapd-2.63+23.10/tests/core/backlight/task.yaml --- snapd-2.62+23.10/tests/core/backlight/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/backlight/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Test system.disable-backlight-service core config option +details: | + Check the backlight service can be enabled and disabled through + system.disable-backlight-service core config option + environment: MASKFILE: /etc/systemd/system/systemd-backlight@.service diff -Nru snapd-2.62+23.10/tests/core/bash-completion/task.yaml snapd-2.63+23.10/tests/core/bash-completion/task.yaml --- snapd-2.62+23.10/tests/core/bash-completion/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/bash-completion/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,8 @@ summary: bash completion +details: | + Check the bash completion works in ubuntu core 22+ + systems: - ubuntu-core-22-* diff -Nru snapd-2.62+23.10/tests/core/basic18/task.yaml snapd-2.63+23.10/tests/core/basic18/task.yaml --- snapd-2.62+23.10/tests/core/basic18/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/basic18/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Check basic core18 system functionality +details: | + Validate basic functionalities are working for uc18. Check commands + like: list, changes and install. And run a basic shell snap. + systems: [ubuntu-core-18-*] execute: | diff -Nru snapd-2.62+23.10/tests/core/classic-snap16/task.yaml snapd-2.63+23.10/tests/core/classic-snap16/task.yaml --- snapd-2.62+23.10/tests/core/classic-snap16/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/classic-snap16/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,10 @@ summary: Ensure classic dimension works correctly +details: | + Validates classic snap can be installed in uc16. Check that classic can + run commands inside classic and sudo works without a password inside + classic. + # classic snap is not maintained for UC18+, for these releases the lxd snaps is # recommended instead of the classic snap systems: [ubuntu-core-16-*] diff -Nru snapd-2.62+23.10/tests/core/compat/task.yaml snapd-2.63+23.10/tests/core/compat/task.yaml --- snapd-2.62+23.10/tests/core/compat/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/compat/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,10 @@ summary: Ensure that core(16) compatibility is there +details: | + Ensure that when snap test-snapd-sh is installed, the core + snap is pulled in, and check that test-snapd-sh sees the core16 + environment. + execute: | echo "Install test-snapd-sh (which uses the core snap)" snap install test-snapd-sh diff -Nru snapd-2.62+23.10/tests/core/config-defaults-once/task.yaml snapd-2.63+23.10/tests/core/config-defaults-once/task.yaml --- snapd-2.62+23.10/tests/core/config-defaults-once/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/config-defaults-once/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,6 +1,11 @@ summary: | Test that configuration defaults are only applied once. +details: | + Checks that configuration defaults are only applied once, and + those are not applied when a snap is installed, which could + trigger system defaults to be reapplied. + # it is not yet possible to install snapd on UC16 # TODO:UC20: enable for UC20, currently fails because there is no seed.yaml in # the same place as UC18 diff -Nru snapd-2.62+23.10/tests/core/core-dump/task.yaml snapd-2.63+23.10/tests/core/core-dump/task.yaml --- snapd-2.62+23.10/tests/core/core-dump/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/core-dump/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Make sure we can generate core dumps on UC +details: | + Check it is possible to generate core dumps on UC. Make sure that dumps + are produced in little time in a place writable from snap context. + execute: | # To get VERSION_ID defined . /etc/os-release diff -Nru snapd-2.62+23.10/tests/core/core-to-snapd-failover16/task.yaml snapd-2.63+23.10/tests/core/core-to-snapd-failover16/task.yaml --- snapd-2.62+23.10/tests/core/core-to-snapd-failover16/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/core-to-snapd-failover16/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,11 @@ summary: Test the failover scenario of the snapd snap installation on a UC16 system +details: | + Check the failover scenario of the snapd snap installation by installing it + multiple times to ensure that we can revert back to the core snap as many times + as needed. Verify that snapd.failure was activated when we tried to install + a broken snapd. + # snapd snap is already installed by default on uc18+ systems: [ubuntu-core-16-*] diff -Nru snapd-2.62+23.10/tests/core/create-user/task.yaml snapd-2.63+23.10/tests/core/create-user/task.yaml --- snapd-2.62+23.10/tests/core/create-user/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/create-user/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,6 +1,10 @@ summary: Ensure that snap create-user works in ubuntu-core # FIXME: combine this test with tests/core/create-user-2? +details: | + Check that the command `snap create-user` works in ubuntu core. + Ensure that the keys are imported for the user and it can be created as a sudoer. + environment: USER_EMAIL: mvo@ubuntu.com USER_NAME: mvo diff -Nru snapd-2.62+23.10/tests/core/create-user-2/task.yaml snapd-2.63+23.10/tests/core/create-user-2/task.yaml --- snapd-2.62+23.10/tests/core/create-user-2/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/create-user-2/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Ensure create-user functionality +details: | + Check the command `snap create-user` succeeds when run as non-root user with sudo. + Ensure the ssh keys are imported for the user and that it can be created as a sudoer. + environment: USER_EMAIL: mvo@ubuntu.com USER_NAME: mvo diff -Nru snapd-2.62+23.10/tests/core/custom-device-reg/task.yaml snapd-2.63+23.10/tests/core/custom-device-reg/task.yaml --- snapd-2.62+23.10/tests/core/custom-device-reg/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/custom-device-reg/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,7 @@ -summary: | - Test that device initialisation and registration can be customized +summary: Test the customized device registration through a gadget hook + +details: | + Check that device initialisation and registration can be customized with the prepare-device gadget hook # TODO:UC20: enable for UC20, it assumes /var/lib/snapd/seed/assertions/model diff -Nru snapd-2.62+23.10/tests/core/custom-device-reg-extras/task.yaml snapd-2.63+23.10/tests/core/custom-device-reg-extras/task.yaml --- snapd-2.62+23.10/tests/core/custom-device-reg-extras/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/custom-device-reg-extras/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,6 @@ -summary: | +summary: Test the customized device registration through a gadget hook + +details: | Test that device initialisation and registration can be customized with the prepare-device gadget hook and this can set request headers, a proposed serial and the body of the serial assertion diff -Nru snapd-2.62+23.10/tests/core/dbus-activation/task.yaml snapd-2.63+23.10/tests/core/dbus-activation/task.yaml --- snapd-2.62+23.10/tests/core/dbus-activation/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/dbus-activation/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,8 @@ summary: Ensure that service activation files are installed on Core systems +details: | + Check that D-Bus daemon configuration files are present on Ubuntu Core systems + execute: | echo "D-Bus daemon configuration files are present on Ubuntu Core systems" test -f /etc/dbus-1/system.d/snapd.system-services.conf -o \ diff -Nru snapd-2.62+23.10/tests/core/desktop-files/task.yaml snapd-2.63+23.10/tests/core/desktop-files/task.yaml --- snapd-2.62+23.10/tests/core/desktop-files/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/desktop-files/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,8 @@ summary: Ensure that desktop files installed on Core systems +details: | + Check that desktop files are present on Ubuntu Core systems + execute: | echo "Desktop files are present on Ubuntu Core systems" test -f /var/lib/snapd/desktop/applications/io.snapcraft.SessionAgent.desktop -o \ diff -Nru snapd-2.62+23.10/tests/core/device-reg/task.yaml snapd-2.63+23.10/tests/core/device-reg/task.yaml --- snapd-2.62+23.10/tests/core/device-reg/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/device-reg/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -2,6 +2,9 @@ Ensure after device initialisation registration worked and we have a serial and can acquire a session macaroon +details: | + Check that initialised devices have a serial and a session macaroon + execute: | #shellcheck source=tests/lib/core-config.sh . "$TESTSLIB"/core-config.sh diff -Nru snapd-2.62+23.10/tests/core/fan/task.yaml snapd-2.63+23.10/tests/core/fan/task.yaml --- snapd-2.62+23.10/tests/core/fan/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/fan/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,8 @@ summary: Test ubuntu-fan +details: | + Check that fanctl exists in UC16 and it can create a fan bridge + # Ubuntu fan not available on ubuntu-core-18 and later, it is not shipped in the # base snap systems: [ubuntu-core-16-*] diff -Nru snapd-2.62+23.10/tests/core/fsck-on-boot/task.yaml snapd-2.63+23.10/tests/core/fsck-on-boot/task.yaml --- snapd-2.62+23.10/tests/core/fsck-on-boot/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/fsck-on-boot/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -29,7 +29,7 @@ umount /boot/efi umount /boot/grub fi - elif os.query is-core20 || os.query is-core22; then + elif os.query is-core-ge 20; then # TODO:UC20 The property of having to keep a mounted vfat at all time # is not the most fortunate. Any power loss will result in a dirty # filesystem. Could ubuntu-seed be re-mounted read-only at some point @@ -65,9 +65,9 @@ fi } - if os.query is-core16 || os.query is-core18; then + if os.query is-core-le 18; then LABEL=system-boot - elif os.query is-core20 || os.query is-core22; then + elif os.query is-core-ge 20; then LABEL=ubuntu-seed else echo "unknown core system, please update test" diff -Nru snapd-2.62+23.10/tests/core/gadget-config-defaults-to-snaps/task.yaml snapd-2.63+23.10/tests/core/gadget-config-defaults-to-snaps/task.yaml --- snapd-2.62+23.10/tests/core/gadget-config-defaults-to-snaps/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/gadget-config-defaults-to-snaps/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,9 @@ -summary: | - Test that config defaults specified in the gadget are picked up - for first boot snaps with either configure hook only, or combination - of default-configure and configure hooks +summary: Test that config defaults are applied correctly + +details: | + Test that config defaults specified in the gadget are picked up + for first boot snaps with either configure hook only, or combination + of default-configure and configure hooks # the test is only meaningful on core devices # TODO:UC20: enable for UC20, it assumes /var/lib/snapd/seed/assertions/model diff -Nru snapd-2.62+23.10/tests/core/gadget-config-defaults-vitality/task.yaml snapd-2.63+23.10/tests/core/gadget-config-defaults-vitality/task.yaml --- snapd-2.62+23.10/tests/core/gadget-config-defaults-vitality/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/gadget-config-defaults-vitality/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -2,6 +2,10 @@ Test that vitality defaults specified in the gadget are picked up for first boot snaps +details: | + Check that vitality defaults specified in the gadget are applied + properly for first boot snaps. + # the test is only meaningful on core devices # TODO:UC20: enable for UC20, it assumes /var/lib/snapd/seed/assertions/model # which we don't have currently diff -Nru snapd-2.62+23.10/tests/core/gadget-kernel-refs-update-pc/task.yaml snapd-2.63+23.10/tests/core/gadget-kernel-refs-update-pc/task.yaml --- snapd-2.62+23.10/tests/core/gadget-kernel-refs-update-pc/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/gadget-kernel-refs-update-pc/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Exercise a gadget update with kernel refs on a PC +details: | + Check that it is possible to revert a kernel snap when kernel gadgets + are installed. + # TODO:UC20: once LP: #1907056 is fixed and we have an updated # pi gadget and pi-kernel snap this test should be # replaced with a pi-only test. This test is artificial diff -Nru snapd-2.62+23.10/tests/core/generic-device-reg/task.yaml snapd-2.63+23.10/tests/core/generic-device-reg/task.yaml --- snapd-2.62+23.10/tests/core/generic-device-reg/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/generic-device-reg/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -2,6 +2,10 @@ Test that device initialisation and registration work with the model specifying serial-authority: [generic] +details: | + Check that a device is initialized and registered properly when a + generic serial-authority is used in its model. + # TODO:UC20: enable for UC20, it assumes /var/lib/snapd/seed/assertions/model # which we don't have currently systems: [ubuntu-core-18*] diff -Nru snapd-2.62+23.10/tests/core/grub-no-unpacked-assets/task.yaml snapd-2.63+23.10/tests/core/grub-no-unpacked-assets/task.yaml --- snapd-2.62+23.10/tests/core/grub-no-unpacked-assets/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/core/grub-no-unpacked-assets/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Ensure we have no unpacked kernel.img/initrd.img on grub systems +details: | + Check that vmlinuz, kernel.img and initrd.img are not found in /boot/grub + in Ubuntu Core systems + systems: [ubuntu-core-*-64] environment: diff -Nru snapd-2.62+23.10/tests/lib/assertions/ubuntu-core-24-amd64.model snapd-2.63+23.10/tests/lib/assertions/ubuntu-core-24-amd64.model --- snapd-2.62+23.10/tests/lib/assertions/ubuntu-core-24-amd64.model 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/assertions/ubuntu-core-24-amd64.model 2024-04-24 00:00:39.000000000 +0000 @@ -1,7 +1,8 @@ type: model -authority-id: pxF0fchmU682PEmmay31jjyCHgvscRcj +authority-id: canonical +revision: 2 series: 16 -brand-id: pxF0fchmU682PEmmay31jjyCHgvscRcj +brand-id: canonical model: ubuntu-core-24-amd64-dangerous architecture: amd64 base: core24 @@ -13,7 +14,7 @@ name: pc type: gadget - - default-channel: 24/edge + default-channel: 24/beta id: pYVQrBcKmBa0mZ4CCN7ExT6jH8rY1hza name: pc-kernel type: kernel @@ -27,16 +28,22 @@ id: PMrrV4ml8uWuEUDBT8dSGnKUYbevVhc4 name: snapd type: snapd -timestamp: 2023-11-09T12:57:29+00:00 -sign-key-sha3-384: Z79zvem9vfFnGDf5ujLe9x4Ztdf1mwOOoxSkY1WWv3cSDh_cyFrRkmFGVaqs8rWc + - + default-channel: 24/edge + id: ASctKBEHzVt3f1pbZLoekCvcigRjtuqw + name: console-conf + presence: optional + type: app +timestamp: 2024-03-12T08:42:32+00:00 +sign-key-sha3-384: 9tydnLa6MTJ-jaQTFUXEwHl1yRx7ZS4K5cyFDhYDcPzhS7uyEkDxdUjg9g08BtNn -AcLBcwQAAQoAHRYhBMnGmNEOGnW9TKVit38uBaVTGCUfBQJlTOv0AAoJEH8uBaVTGCUf5AkQAKkW -O5fUF1F/XPRfE+t8FoIIq3eeOolwCxt5XD2LjJ7oLNYh3YfqXIvitA1lSo3KxT0zIJB9cgUx5ITN -RMVvRP9jkcFC0K2sUcoEbJAkdXlWpvdWWjaQbmZ+9cVEUQF9QZmF4MMOxPpFs8lDrcc7QfGw+g/7 -xeomXygiC3yAmBrDzfqojJnIPb80u/1qWrSa8DCr09xELK7m30kVwlk9o865nyDQsCh/5RT63/Ja -7hJEKgihf4VAzOgfEM8tYQ5jFmhvwC3/uaSonoge6/fJBQI+pKKswjxfmQH73xxX9vifkbbX/atj -e359+YPAzntFuMvoNBA3y3JXaWpOzwZzKsr2csnQzFlcpbwjzrLPAGpkuWbFXlMbX8VyUHZHfMuj -p/Ro1A7S3OQmdm0dL8L8yzQhrgvA2bPnPUSvgyK4lnzBFwkCzbw93AG9JMtIx0nLiNRgdBeavxaL -EH42gqVLt22F5oFCfvcv1IMJ92mjJ9Txs4ubFtk9NYoW7hBqxnZoBEZTEAg+DRSF9RAg1c+nhwhi -hDMuRfjkgvciJYOx6hhYWenHZYqeyHcIDcSWAtKZWS2W81kyIYBo4yT5PaA0+7T727WX6N0zjYFM -/k8a4NipctTZb9UDIqL9U0zYZmGnhmsixclqm0EwFqgO7l2+6jwcsT8EgFGVJ672DirmyKZs \ No newline at end of file +AcLBXAQAAQoABgUCZfBvFgAKCRDgT5vottzAEkUZD/9q2UjBGOMNUaAOqpSxtAwgKGtt1uVYE74d +5U3V+gqHC5x6eSdXkm6DCbHPp54Lxz3f4so2Epp8lYGyrtUPJdXmP57w49BvMZfItudSto8IPkdZ +ogYYZQfaV5L0JUL+OEdrOlbuUEWkHAbqxKrFHlv5c3VwlzaplQixTenyvfAxsERJSgrRUaz1FuL/ +AuoWkz4hpvDe1JV+mLyHmaqea8U9g+H7gd5x5pSI/f6S2t1Ercds3fDe8Ot92E2vsi5PhOC/z5mn +MS2Lv7KYKXMIMvOfh2GzV2cv2ZPjPv/D8lJ/y4BCl4N1iUmUb52fW6m3Whdi/LqDM0VkSvJXWX23 +H+szcnq09EM4ajcuXxUGVl9Z0QeQG9goKFRqF0lTfMo2EIGkOvWbO077SjzWWpFfr7GtB74J9xu3 +RgxDPmk0HEdYy4u7jUAiOpIMytmKiCor7hXCnMUvadP//slpnH2Pi5nqq0bm32nID6JjBjYO3IrL +TuuSaKbjGpyHIGg/dUBZltHtsgGgRxF7CFrtzxhGjdIg8tllG6nMdPm9G3Vsic9zuBG8uLAOX7Dr +RvT5/ylHYnbE68Y6TQJGhfH1qYpbqnG3QTJX8bDDHJ4nBKyNhBdrZQ2wOvBOLnyN9npaJO83qpF6 +Xv/6BUbwNtOnPL3ovnBLe1RJ358a7TcUtfuES1XlyQ== \ No newline at end of file diff -Nru snapd-2.62+23.10/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml snapd-2.63+23.10/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml --- snapd-2.62+23.10/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -60,6 +60,8 @@ ! os.query is-debian os.query is-classic ! os.query is-core + ! os.query is-core-ge 20 + ! os.query is-core-le 20 os.query is-pc-amd64 ! os.query is-arm ;; @@ -69,6 +71,10 @@ os.query is-ubuntu 22.04 ! os.query is-ubuntu 20.04 ! os.query is-core + ! os.query is-core-ge 20 + ! os.query is-core-le 20 + ! os.query is-core-le 24 + ! os.query is-core-ge 24 ;; ubuntu-23.10-64) os.query is-classic diff -Nru snapd-2.62+23.10/tests/lib/external/snapd-testing-tools/tools/os.query snapd-2.63+23.10/tests/lib/external/snapd-testing-tools/tools/os.query --- snapd-2.62+23.10/tests/lib/external/snapd-testing-tools/tools/os.query 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/external/snapd-testing-tools/tools/os.query 2024-04-24 00:00:39.000000000 +0000 @@ -13,63 +13,27 @@ } is_core() { - # We need to check $SPREAD_SYSTEM var because in snapd the os-release file does - # not contain the ubuntu-core info while the system is being prepared - if [ -n "$SPREAD_SYSTEM" ]; then - [[ "$SPREAD_SYSTEM" == ubuntu-core-* ]] - else - grep -qFx 'ID=ubuntu-core' /etc/os-release - fi + grep -qFx 'ID=ubuntu-core' /etc/os-release } is_core16() { - # We need to check $SPREAD_SYSTEM var because in snapd the os-release file does - # not contain the ubuntu-core info while the system is being prepared - if [ -n "$SPREAD_SYSTEM" ]; then - [[ "$SPREAD_SYSTEM" == ubuntu-core-16-* ]] - else - grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="16"' /etc/os-release - fi + grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="16"' /etc/os-release } is_core18() { - # We need to check $SPREAD_SYSTEM var because in snapd the os-release file does - # not contain the ubuntu-core info while the system is being prepared - if [ -n "$SPREAD_SYSTEM" ]; then - [[ "$SPREAD_SYSTEM" == ubuntu-core-18-* ]] - else - grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="18"' /etc/os-release - fi + grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="18"' /etc/os-release } is_core20() { - # We need to check $SPREAD_SYSTEM var because in snapd the os-release file does - # not contain the ubuntu-core info while the system is being prepared - if [ -n "$SPREAD_SYSTEM" ]; then - [[ "$SPREAD_SYSTEM" == ubuntu-core-20-* ]] - else - grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="20"' /etc/os-release - fi + grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="20"' /etc/os-release } is_core22() { - # We need to check $SPREAD_SYSTEM var because in snapd the os-release file does - # not contain the ubuntu-core info while the system is being prepared - if [ -n "$SPREAD_SYSTEM" ]; then - [[ "$SPREAD_SYSTEM" == ubuntu-core-22-* ]] - else - grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="22"' /etc/os-release - fi + grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="22"' /etc/os-release } is_core24() { - # We need to check $SPREAD_SYSTEM var because in snapd the os-release file does - # not contain the ubuntu-core info while the system is being prepared - if [ -n "$SPREAD_SYSTEM" ]; then - [[ "$SPREAD_SYSTEM" == ubuntu-core-24-* ]] - else - grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="24"' /etc/os-release - fi + grep -qFx 'ID=ubuntu-core' /etc/os-release && grep -qFx 'VERSION_ID="24"' /etc/os-release } is_core_gt() { @@ -79,11 +43,7 @@ exit 1 fi - # While ubuntu classic image is used to build a core image - if ! grep -qFx 'ID=ubuntu-core' /etc/os-release; then - VERSION="${VERSION}.04" - fi - compare_ubuntu "$VERSION" "-gt" + is_core && compare_ubuntu "$VERSION" "-gt" } is_core_ge() { @@ -93,11 +53,7 @@ exit 1 fi - # While ubuntu classic image is used to build a core image - if ! grep -qFx 'ID=ubuntu-core' /etc/os-release; then - VERSION="${VERSION}.04" - fi - compare_ubuntu "$VERSION" "-ge" + is_core && compare_ubuntu "$VERSION" "-ge" } is_core_lt() { @@ -107,11 +63,7 @@ exit 1 fi - # While ubuntu classic image is used to build a core image - if ! grep -qFx 'ID=ubuntu-core' /etc/os-release; then - VERSION="${VERSION}.04" - fi - compare_ubuntu "$VERSION" "-lt" + is_core && compare_ubuntu "$VERSION" "-lt" } is_core_le() { @@ -121,11 +73,7 @@ exit 1 fi - # This case is when ubuntu classic image is used to build a core image - if ! grep -qFx 'ID=ubuntu-core' /etc/os-release; then - VERSION="${VERSION}.04" - fi - compare_ubuntu "$VERSION" "-le" + is_core && compare_ubuntu "$VERSION" "-le" } is_classic() { @@ -166,19 +114,19 @@ } is_ubuntu_gt() { - compare_ubuntu "${1:-}" "-gt" + is_classic && compare_ubuntu "${1:-}" "-gt" } is_ubuntu_ge() { - compare_ubuntu "${1:-}" "-ge" + is_classic && compare_ubuntu "${1:-}" "-ge" } is_ubuntu_lt() { - compare_ubuntu "${1:-}" "-lt" + is_classic && compare_ubuntu "${1:-}" "-lt" } is_ubuntu_le() { - compare_ubuntu "${1:-}" "-le" + is_classic && compare_ubuntu "${1:-}" "-le" } compare_ubuntu() { diff -Nru snapd-2.62+23.10/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck snapd-2.63+23.10/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck --- snapd-2.62+23.10/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck 2024-04-24 00:00:39.000000000 +0000 @@ -117,6 +117,16 @@ # shellcheck knows about that script_data = [] script_data.append('set -e') + script_data.append('export SPREAD_BACKEND=placeholder') + script_data.append('export SPREAD_BACKENDS=placeholder,list') + script_data.append('export SPREAD_SYSTEM=placeholder') + script_data.append('export SPREAD_JOB=placeholder') + script_data.append('export SPREAD_VARIANT=placeholder') + script_data.append('export SPREAD_PATH=placeholder') + script_data.append('export SPREAD_SYSTEM_USERNAME=placeholder') + script_data.append('export SPREAD_SYSTEM_PASSWORD=placeholder') + script_data.append('export SPREAD_SYSTEM_ADDRESS=placeholder') + script_data.append('export SPREAD_REBOOT=123') for key, value in env.items(): value = str(value) diff -Nru snapd-2.62+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_repair.go snapd-2.63+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_repair.go --- snapd-2.62+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_repair.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_repair.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/snapcore/snapd/tests/lib/fakestore/refresh" ) @@ -39,7 +39,7 @@ func (x *cmdNewRepair) Execute(args []string) error { headers := map[string]interface{}{} if x.RepairJSON != "" { - content, err := ioutil.ReadFile(x.RepairJSON) + content, err := os.ReadFile(x.RepairJSON) if err != nil { return err } diff -Nru snapd-2.62+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_decl.go snapd-2.63+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_decl.go --- snapd-2.62+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_decl.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_decl.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/snapcore/snapd/tests/lib/fakestore/refresh" ) @@ -39,7 +39,7 @@ func (x *cmdNewSnapDeclaration) Execute(args []string) error { headers := map[string]interface{}{} if x.SnapDeclJsonPath != "" { - content, err := ioutil.ReadFile(x.SnapDeclJsonPath) + content, err := os.ReadFile(x.SnapDeclJsonPath) if err != nil { return err } diff -Nru snapd-2.62+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_rev.go snapd-2.63+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_rev.go --- snapd-2.62+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_rev.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/cmd/fakestore/cmd_new_snap_rev.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/snapcore/snapd/tests/lib/fakestore/refresh" ) @@ -39,7 +39,7 @@ func (x *cmdNewSnapRevision) Execute(args []string) error { headers := map[string]interface{}{} if x.SnapRevJsonPath != "" { - content, err := ioutil.ReadFile(x.SnapRevJsonPath) + content, err := os.ReadFile(x.SnapRevJsonPath) if err != nil { return err } diff -Nru snapd-2.62+23.10/tests/lib/fakestore/refresh/refresh.go snapd-2.63+23.10/tests/lib/fakestore/refresh/refresh.go --- snapd-2.62+23.10/tests/lib/fakestore/refresh/refresh.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/refresh/refresh.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -114,7 +113,7 @@ func makeFakeRefreshForSnap(snap, targetDir, snapBlob, snapOrigBlob string, db *asserts.Database, f asserts.Fetcher) error { // make a fake update snap in /var/tmp (which is not a tempfs) - fakeUpdateDir, err := ioutil.TempDir("/var/tmp", "snap-build-") + fakeUpdateDir, err := os.MkdirTemp("/var/tmp", "snap-build-") if err != nil { return fmt.Errorf("creating tmp for fake update: %v", err) } diff -Nru snapd-2.62+23.10/tests/lib/fakestore/refresh/snap_asserts.go snapd-2.63+23.10/tests/lib/fakestore/refresh/snap_asserts.go --- snapd-2.62+23.10/tests/lib/fakestore/refresh/snap_asserts.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/refresh/snap_asserts.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,7 @@ import ( "fmt" - "io/ioutil" + "os" "path/filepath" "strings" "time" @@ -115,7 +115,7 @@ } } - scriptBodyBytes, err := ioutil.ReadFile(scriptFilename) + scriptBodyBytes, err := os.ReadFile(scriptFilename) if err != nil { return "", err } diff -Nru snapd-2.62+23.10/tests/lib/fakestore/store/store.go snapd-2.63+23.10/tests/lib/fakestore/store/store.go --- snapd-2.62+23.10/tests/lib/fakestore/store/store.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/store/store.go 2024-04-24 00:00:39.000000000 +0000 @@ -24,10 +24,10 @@ "encoding/json" "errors" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" "path/filepath" "regexp" "strconv" @@ -561,7 +561,7 @@ } for _, fn := range aFiles { - b, err := ioutil.ReadFile(fn) + b, err := os.ReadFile(fn) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/tests/lib/fakestore/store/store_test.go snapd-2.63+23.10/tests/lib/fakestore/store/store_test.go --- snapd-2.62+23.10/tests/lib/fakestore/store/store_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fakestore/store/store_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "os" "path/filepath" @@ -97,7 +97,7 @@ defer resp.Body.Close() c.Assert(resp.StatusCode, Equals, 418) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Assert(string(body), Equals, "I'm a teapot") @@ -109,7 +109,7 @@ defer resp.Body.Close() c.Assert(resp.StatusCode, Equals, 501) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Assert(string(body), Equals, "search not implemented") @@ -471,7 +471,7 @@ c.Assert(resp.StatusCode, Equals, 200) c.Check(resp.Header.Get("Content-Type"), Equals, "application/x.ubuntu.assertion") - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(body), Equals, string(asserts.Encode(systestkeys.TestRootAccount))) } @@ -490,7 +490,7 @@ defer resp.Body.Close() c.Assert(resp.StatusCode, Equals, 200) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(body), Equals, exampleSnapRev) } @@ -504,7 +504,7 @@ defer resp.Body.Close() c.Check(resp.StatusCode, Equals, 200) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(body), Equals, exampleValidationSet) } @@ -518,7 +518,7 @@ defer resp.Body.Close() c.Check(resp.StatusCode, Equals, 200) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(body), Equals, exampleValidationSet) } @@ -532,7 +532,7 @@ defer resp.Body.Close() c.Assert(resp.StatusCode, Equals, 400) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(body), Equals, "cannot retrieve assertion [16 canonical base-set]: the requested sequence must be above 0\n") } @@ -543,7 +543,7 @@ defer resp.Body.Close() c.Assert(resp.StatusCode, Equals, 400) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) c.Assert(err, IsNil) c.Check(string(body), Equals, "cannot retrieve assertion [16 canonical base-set]: cannot parse sequence foo: strconv.Atoi: parsing \"foo\": invalid syntax\n") } diff -Nru snapd-2.62+23.10/tests/lib/fde-setup-hook/fde-setup.go snapd-2.63+23.10/tests/lib/fde-setup-hook/fde-setup.go --- snapd-2.62+23.10/tests/lib/fde-setup-hook/fde-setup.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fde-setup-hook/fde-setup.go 2024-04-24 00:00:39.000000000 +0000 @@ -6,7 +6,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -69,7 +68,7 @@ if fromInitrd { var err error - input, err = ioutil.ReadAll(os.Stdin) + input, err = io.ReadAll(os.Stdin) if err != nil { return err } @@ -157,7 +156,7 @@ var js fdeRevealJSON var jsStrict fdeRevealJSONStrict - b, err := ioutil.ReadAll(osStdin) + b, err := io.ReadAll(osStdin) if err != nil { return err } diff -Nru snapd-2.62+23.10/tests/lib/fde-setup-hook-v1/fde-setup.go snapd-2.63+23.10/tests/lib/fde-setup-hook-v1/fde-setup.go --- snapd-2.62+23.10/tests/lib/fde-setup-hook-v1/fde-setup.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/fde-setup-hook-v1/fde-setup.go 2024-04-24 00:00:39.000000000 +0000 @@ -5,7 +5,7 @@ "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "os" "os/exec" "path/filepath" @@ -109,7 +109,7 @@ var js fdeRevealJSON var jsStrict fdeRevealJSONStrict - b, err := ioutil.ReadAll(os.Stdin) + b, err := io.ReadAll(os.Stdin) if err != nil { return err } diff -Nru snapd-2.62+23.10/tests/lib/muinstaller/main.go snapd-2.63+23.10/tests/lib/muinstaller/main.go --- snapd-2.62+23.10/tests/lib/muinstaller/main.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/muinstaller/main.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "bytes" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -75,7 +74,7 @@ } devicesLoop: for _, removableAttr := range removable { - val, err := ioutil.ReadFile(removableAttr) + val, err := os.ReadFile(removableAttr) if err != nil || string(val) != "0\n" { // removable, ignore continue diff -Nru snapd-2.62+23.10/tests/lib/nested.sh snapd-2.63+23.10/tests/lib/nested.sh --- snapd-2.62+23.10/tests/lib/nested.sh 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/nested.sh 2024-04-24 00:00:39.000000000 +0000 @@ -1647,3 +1647,25 @@ sleep "$wait" done } + +nested_check_spread_results() { + SPREAD_LOG=$1 + if [ -z "$SPREAD_LOG" ]; then + return 1 + fi + + if grep -eq "Successful tasks:" "$SPREAD_LOG"; then + if grep -E "Failed (task|suite|project)" "$SPREAD_LOG"; then + return 1 + fi + if ! grep -eq "Aborted tasks: 0" "$SPREAD_LOG"; then + return 1 + fi + + if [ "$EXIT_STATUS" = "0" ]; then + return 0 + fi + else + return 1 + fi +} diff -Nru snapd-2.62+23.10/tests/lib/pkgdb.sh snapd-2.63+23.10/tests/lib/pkgdb.sh --- snapd-2.62+23.10/tests/lib/pkgdb.sh 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/pkgdb.sh 2024-04-24 00:00:39.000000000 +0000 @@ -376,7 +376,11 @@ cp /etc/apt/sources.list sources.list.back echo "deb http://archive.ubuntu.com/ubuntu/ $(lsb_release -c -s)-proposed restricted main multiverse universe" | tee /etc/apt/sources.list -a apt update - apt install -y --only-upgrade snapd + if os.query is-ubuntu-ge 23.10; then + apt install -y --only-upgrade -t "$(lsb_release -c -s)-proposed" snapd + else + apt install -y --only-upgrade snapd + fi mv sources.list.back /etc/apt/sources.list apt update @@ -398,11 +402,12 @@ add-apt-repository -y "$PPA_VALIDATION_NAME" apt update apt install -y --only-upgrade snapd - add-apt-repository --remove "$PPA_VALIDATION_NAME" - apt update # Double check that it really comes from the PPA apt show snapd | MATCH "APT-Sources: http.*ppa\.launchpad(content)?\.net" + + add-apt-repository --remove "$PPA_VALIDATION_NAME" + apt update else packages= case "$SPREAD_SYSTEM" in diff -Nru snapd-2.62+23.10/tests/lib/prepare-restore.sh snapd-2.63+23.10/tests/lib/prepare-restore.sh --- snapd-2.62+23.10/tests/lib/prepare-restore.sh 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/prepare-restore.sh 2024-04-24 00:00:39.000000000 +0000 @@ -629,7 +629,9 @@ prepare_suite() { # shellcheck source=tests/lib/prepare.sh . "$TESTSLIB"/prepare.sh - if os.query is-core; then + # os.query cannot be used because first time the suite is prepared, the current system + # is classic ubuntu, so it is needed to check the system set in $SPREAD_SYSTEM + if is_test_target_core; then prepare_ubuntu_core else prepare_classic @@ -664,7 +666,7 @@ tests.backup prepare # save the job which is going to be executed in the system - echo -n "$SPREAD_JOB " >> "$RUNTIME_STATE_PATH/runs" + echo -n "${SPREAD_JOB:-} " >> "$RUNTIME_STATE_PATH/runs" # Restart journal log and reset systemd journal cursor. systemctl reset-failed systemd-journald.service diff -Nru snapd-2.62+23.10/tests/lib/prepare.sh snapd-2.63+23.10/tests/lib/prepare.sh --- snapd-2.62+23.10/tests/lib/prepare.sh 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/prepare.sh 2024-04-24 00:00:39.000000000 +0000 @@ -44,6 +44,37 @@ systemctl daemon-reload } +# Set of helpers for checking if the test system is expected to be +# Ubuntu Core. The helpers can be used both in a UC system or in +# a classic system which will be transformed into UC. Note, the +# helpers assume a specific formatting of SPREAD_SYSTEM environment +# variable which follows this pattern: ubuntu-core-[-ARCH]- +# where arch is "" for amd64, arm for armhf and arm64, etc +is_test_target_core() { + local VERSION=${1:-} + [[ "$SPREAD_SYSTEM" = ubuntu-core-${VERSION}* ]] +} + +is_test_target_core_ge() { + local VERSION=${1:-} + if [ -z "$VERSION" ]; then + echo "version id is expected" + exit 1 + fi + CURR_VERSION="$(cut -d- -f3 <<< "$SPREAD_SYSTEM")" + [ "$CURR_VERSION" -ge "${VERSION}" ] +} + +is_test_target_core_le() { + local VERSION=${1:-} + if [ -z "$VERSION" ]; then + echo "version id is expected" + exit 1 + fi + CURR_VERSION="$(cut -d- -f3 <<< "$SPREAD_SYSTEM")" + [ "$CURR_VERSION" -le "${VERSION}" ] +} + ensure_jq() { if command -v jq; then return @@ -794,35 +825,29 @@ unmkinitramfs initrd.img initrd - local output_initrd="${PWD}/initrd.img" - - local unpacked_initrd_root="${PWD}/initrd" - if os.query is-pc-amd64; then - unpacked_initrd_root="${unpacked_initrd_root}/main" + if [ -d ./extra-initrd ]; then + if [ -d ./initrd/early ]; then + cp -aT ./extra-initrd ./initrd/main + else + cp -aT ./extra-initrd ./initrd + fi fi - # copy in snap-bootstrap from the current build - cp /usr/lib/snapd/snap-bootstrap "${unpacked_initrd_root}/usr/lib/snapd/snap-bootstrap" + if [ -d ./initrd/early ]; then + cp -a /usr/lib/snapd/snap-bootstrap ./initrd/main/usr/lib/snapd/snap-bootstrap - # copy in extra files that tests may need for the initrd - if [ -d ./extra-initrd/ ]; then - cp -a ./extra-initrd/* "${unpacked_initrd_root}/" - fi + (cd ./initrd/early; find . | cpio --create --quiet --format=newc --owner=0:0) >initrd.img + (cd ./initrd/main; find . | cpio --create --quiet --format=newc --owner=0:0 | zstd -1 -T0) >>initrd.img + else + cp -a /usr/lib/snapd/snap-bootstrap ./initrd/usr/lib/snapd/snap-bootstrap - cd "${unpacked_initrd_root}" - find . | cpio --create --quiet --format=newc --owner=0:0 | lz4 -l -7 > "${output_initrd}" - cd - + (cd ./initrd; find . | cpio --create --quiet --format=newc --owner=0:0 | zstd -1 -T0) >initrd.img + fi - quiet apt download systemd-boot-efi - quiet apt install -y llvm - dpkg --fsys-tarfile systemd-boot-efi_*.deb | - tar xf - ./usr/lib/systemd/boot/efi/linuxx64.efi.stub + quiet apt install -y systemd-boot-efi systemd-ukify objcopy -O binary -j .linux pc-kernel/kernel.efi linux - llvm-objcopy --add-section .linux=linux --set-section-flags .linux=readonly,data \ - --add-section .initrd=initrd.img --set-section-flags .initrd=readonly,data \ - usr/lib/systemd/boot/efi/linuxx64.efi.stub \ - pc-kernel/kernel.efi + /usr/lib/systemd/ukify build --linux=linux --initrd=initrd.img --output=pc-kernel/kernel.efi #shellcheck source=tests/lib/nested.sh . "$TESTSLIB/nested.sh" @@ -978,19 +1003,19 @@ snap wait system seed.loaded # download the snapd snap for all uc systems except uc16 - if ! os.query is-core16; then + if ! is_test_target_core 16; then snap download "--channel=${SNAPD_CHANNEL}" snapd fi # we cannot use "snaps.names tool" here because no snaps are installed yet core_name="core" - if os.query is-core18; then + if is_test_target_core 18; then core_name="core18" - elif os.query is-core20; then + elif is_test_target_core 20; then core_name="core20" - elif os.query is-core22; then + elif is_test_target_core 22; then core_name="core22" - elif os.query is-core24; then + elif is_test_target_core 24; then core_name="core24" # TODO: revert this once snaps are ready in target channel KERNEL_CHANNEL=beta @@ -1010,7 +1035,7 @@ if os.query is-arm; then snap install ubuntu-image --channel="$UBUNTU_IMAGE_SNAP_CHANNEL" --classic - elif os.query is-core16; then + elif is_test_target_core 16; then # the new ubuntu-image expects mkfs to support -d option, which was not # supported yet by the version of mkfs that shipped with Ubuntu 16.04 snap install ubuntu-image --channel="$UBUNTU_IMAGE_SNAP_CHANNEL" --classic @@ -1033,21 +1058,21 @@ cp /usr/bin/snap "$IMAGE_HOME" export UBUNTU_IMAGE_SNAP_CMD="$IMAGE_HOME/snap" - if os.query is-core18; then + if is_test_target_core 18; then repack_snapd_snap_with_deb_content "$IMAGE_HOME" # FIXME: fetch directly once its in the assertion service cp "$TESTSLIB/assertions/ubuntu-core-18-amd64.model" "$IMAGE_HOME/pc.model" - elif os.query is-core20; then + elif is_test_target_core 20; then repack_snapd_snap_with_deb_content_and_run_mode_firstboot_tweaks "$IMAGE_HOME" cp "$TESTSLIB/assertions/ubuntu-core-20-amd64.model" "$IMAGE_HOME/pc.model" - elif os.query is-core22; then + elif is_test_target_core 22; then repack_snapd_snap_with_deb_content_and_run_mode_firstboot_tweaks "$IMAGE_HOME" if os.query is-arm; then cp "$TESTSLIB/assertions/ubuntu-core-22-arm64.model" "$IMAGE_HOME/pc.model" else cp "$TESTSLIB/assertions/ubuntu-core-22-amd64.model" "$IMAGE_HOME/pc.model" fi - elif os.query is-core24; then + elif is_test_target_core 24; then repack_snapd_snap_with_deb_content_and_run_mode_firstboot_tweaks "$IMAGE_HOME" cp "$TESTSLIB/assertions/ubuntu-core-24-amd64.model" "$IMAGE_HOME/pc.model" else @@ -1093,10 +1118,10 @@ IMAGE_CHANNEL="$KERNEL_CHANNEL" else IMAGE_CHANNEL="$GADGET_CHANNEL" - if os.query is-core16 || os.query is-core18; then - if os.query is-core16; then + if is_test_target_core_le 18; then + if is_test_target_core 16; then BRANCH=latest - elif os.query is-core18; then + elif is_test_target_core 18; then BRANCH=18 fi # download pc-kernel snap for the specified channel and set @@ -1115,19 +1140,19 @@ fi fi - if os.query is-core-ge 20; then - if os.query is-core20; then + if is_test_target_core_ge 20; then + if is_test_target_core 20; then BRANCH=20 - elif os.query is-core22; then + elif is_test_target_core 22; then BRANCH=22 - elif os.query is-core24; then + elif is_test_target_core 24; then BRANCH=24 fi snap download --basename=pc-kernel --channel="${BRANCH}/${KERNEL_CHANNEL}" pc-kernel # make sure we have the snap test -e pc-kernel.snap # build the initramfs with our snapd assets into the kernel snap - if os.query is-core-ge 24; then + if is_test_target_core_ge 24; then uc24_build_initramfs_kernel_snap "$PWD/pc-kernel.snap" "$IMAGE_HOME" else uc20_build_initramfs_kernel_snap "$PWD/pc-kernel.snap" "$IMAGE_HOME" @@ -1136,25 +1161,15 @@ # also add debug command line parameters to the kernel command line via # the gadget in case things go side ways and we need to debug - if os.query is-core24; then + if is_test_target_core 24; then # TODO: remove this once pc snap is available in beta channel snap download --basename=pc --channel="${BRANCH}/edge" pc - # TODO: remove this once 24/edge channel is fixed - snap download --basename=pc-23 --channel="classic-23.10/edge" pc else snap download --basename=pc --channel="${BRANCH}/${KERNEL_CHANNEL}" pc fi test -e pc.snap unsquashfs -d pc-gadget pc.snap - # TODO: remove this once 24/edge channel is fixed - if os.query is-core24; then - unsquashfs -d pc-gadget-23 pc-23.snap - cp pc-gadget-23/shim.efi.signed pc-gadget/shim.efi.signed - cp pc-gadget-23/grubx64.efi pc-gadget/grubx64.efi - rm -r pc-gadget-23 pc-23.{snap,assert} - fi - # TODO: it would be desirable when we need to do in-depth debugging of # UC20 runs in google to have snapd.debug=1 always on the kernel command # line, but we can't do this universally because the logic for the env @@ -1202,7 +1217,7 @@ # on core18 we need to use the modified snapd snap and on core16 # it is the modified core that contains our freshly build snapd - if os.query is-core-ge 18; then + if is_test_target_core_ge 18; then extra_snap=("$IMAGE_HOME"/snapd_*.snap) else extra_snap=("$IMAGE_HOME"/core_*.snap) @@ -1215,11 +1230,13 @@ fi # download the core20 snap manually from the specified channel for UC20 - if os.query os.query is-core-ge 20; then - if os.query is-core20; then + if is_test_target_core_ge 20; then + if is_test_target_core 20; then BASE=core20 - elif os.query is-core22; then + elif is_test_target_core 22; then BASE=core22 + elif is_test_target_core 24; then + BASE=core24 fi snap download "${BASE}" --channel="$BASE_CHANNEL" --basename="${BASE}" @@ -1249,7 +1266,7 @@ EXTRA_FUNDAMENTAL="$EXTRA_FUNDAMENTAL --snap ${IMAGE_HOME}/${BASE}.snap" fi local UBUNTU_IMAGE="$GOHOME"/bin/ubuntu-image - if os.query is-core16 || os.query is-arm; then + if is_test_target_core 16 || os.query is-arm; then # ubuntu-image on 16.04 needs to be installed from a snap UBUNTU_IMAGE=/snap/bin/ubuntu-image fi @@ -1265,7 +1282,7 @@ if os.query is-arm; then LOOP_PARTITION=1 - elif os.query is-core-ge 20; then + elif is_test_target_core_ge 20; then # (ab)use ubuntu-seed LOOP_PARTITION=2 else @@ -1275,7 +1292,7 @@ # expand the uc16 and uc18 images a little bit (400M) as it currently will # run out of space easily from local spread runs if there are extra files in # the project not included in the git ignore and spread ignore, etc. - if os.query is-core-le 18; then + if is_test_target_core_le 18; then # grow the image by 400M truncate --size=+400M "$IMAGE_HOME/$IMAGE" # fix the GPT table because old versions of parted complain about this @@ -1311,7 +1328,7 @@ # - built debs # - golang archive files and built packages dir # - govendor .cache directory and the binary, - if os.query is-core-le 18; then + if is_test_target_core_le 18; then mkdir -p /mnt/user-data/ # we need to include "core" here because -C option says to ignore # files the way CVS(?!) does, so it ignores files named "core" which @@ -1364,7 +1381,7 @@ fi # now modify the image writable partition - only possible on uc16 / uc18 - if os.query is-core-le 18; then + if is_test_target_core_le 18; then # modify the writable partition of "core" so that we have the # test user setup_core_for_testing_by_modify_writable "$UNPACK_DIR" @@ -1375,7 +1392,7 @@ kpartx -d "$IMAGE_HOME/$IMAGE" gzip "${IMAGE_HOME}/${IMAGE}" - if os.query is-core16; then + if is_test_target_core 16; then "${TESTSLIB}/uc16-reflash.sh" "${IMAGE_HOME}/${IMAGE}.gz" else "${TESTSLIB}/reflash.sh" "${IMAGE_HOME}/${IMAGE}.gz" diff -Nru snapd-2.62+23.10/tests/lib/reset.sh snapd-2.63+23.10/tests/lib/reset.sh --- snapd-2.62+23.10/tests/lib/reset.sh 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/reset.sh 2024-04-24 00:00:39.000000000 +0000 @@ -35,6 +35,23 @@ ;; esac + local unexpected_units=0 + for unit in $(systemctl --plain --no-legend --full | awk '/^ *snap\..*\.service +loaded/ {print $1}'); do + case "$unit" in + snap.lxd.workaround.service) + systemctl stop "$unit" || true + ;; + *) + echo "unexpected unit $unit" + unexpected_units=1 + ;; + esac + done + if [ "$unexpected_units" != "0" ]; then + echo "error: found unexpected systemd units after purge" + exit 1 + fi + # purge may have removed udev rules, retrigger device events udevadm trigger udevadm settle diff -Nru snapd-2.62+23.10/tests/lib/tools/suite/user-state/task.yaml snapd-2.63+23.10/tests/lib/tools/suite/user-state/task.yaml --- snapd-2.62+23.10/tests/lib/tools/suite/user-state/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/tools/suite/user-state/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: smoke test for the user-state tool +details: | + Check the user-state test tools works properly. Verify it can + remove/list users and groups. + execute: | # Check help "$TESTSTOOLS"/user-state | MATCH "usage: user-state remove-with-group " @@ -14,7 +18,11 @@ # Check remove user and group if [ -f /var/lib/extrausers/passwd ]; then - adduser --extrausers --quiet --disabled-password --gecos '' mytest001 + if os.query is-core-ge 24; then + useradd --extrausers mytest001 + else + adduser --extrausers --quiet --disabled-password --gecos '' mytest001 + fi else groupadd mytest001 useradd mytest001 -g mytest001 diff -Nru snapd-2.62+23.10/tests/lib/tools/tests.invariant snapd-2.63+23.10/tests/lib/tools/tests.invariant --- snapd-2.62+23.10/tests/lib/tools/tests.invariant 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/lib/tools/tests.invariant 2024-04-24 00:00:39.000000000 +0000 @@ -199,6 +199,14 @@ fi } +check_fakestore_cleaned() { + # Check if fakestore was properly cleaned to avoid leaking into other tests. + if [ -f "/etc/systemd/system/snapd.service.d/store.conf" ]; then + echo "/etc/systemd/system/snapd.service.d/store.conf was not cleaned properly" + exit 1 + fi +} + check_invariant() { case "$1" in root-files-in-home) @@ -225,6 +233,9 @@ segmentation-violations) check_segmentation_violations "$1" ;; + check-fakestore-cleaned) + check_fakestore_cleaned + ;; *) echo "tests.invariant: unknown invariant $1" >&2 exit 1 @@ -233,7 +244,7 @@ } main() { - ALL_INVARIANTS="root-files-in-home crashed-snap-confine lxcfs-mounted stray-dbus-daemon leftover-defer-sh broken-snaps cgroup-scopes segmentation-violations" + ALL_INVARIANTS="root-files-in-home crashed-snap-confine lxcfs-mounted stray-dbus-daemon leftover-defer-sh broken-snaps cgroup-scopes segmentation-violations check-fakestore-cleaned" case "$action" in check) diff -Nru snapd-2.62+23.10/tests/main/cgroup-devices-v1/task.yaml snapd-2.63+23.10/tests/main/cgroup-devices-v1/task.yaml --- snapd-2.62+23.10/tests/main/cgroup-devices-v1/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/cgroup-devices-v1/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -9,6 +9,17 @@ is executed via snap-confine. # Disable the test on all systems that boot with cgroup v2 -systems: [ -fedora-*, -debian-*, -arch-*, -opensuse-tumbleweed-*, -ubuntu-22.*, -ubuntu-23.*, -ubuntu-24.*, -ubuntu-core-22-*, -centos-9-*, -amazon-linux-2023-*] +systems: + - -fedora-* + - -debian-* + - -arch-* + - -opensuse-tumbleweed-* + - -centos-9-* + - -amazon-linux-2023-* + - -ubuntu-22.* + - -ubuntu-23.* + - -ubuntu-24.* + - -ubuntu-core-22-* + - -ubuntu-core-24-* execute: ./task.sh diff -Nru snapd-2.62+23.10/tests/main/cgroup-freezer/task.yaml snapd-2.63+23.10/tests/main/cgroup-freezer/task.yaml --- snapd-2.62+23.10/tests/main/cgroup-freezer/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/cgroup-freezer/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -5,7 +5,18 @@ placed into the appropriate hierarchy under the freezer cgroup. # Disable the test on all systems that boot with cgroup v2 -systems: [ -fedora-*, -debian-*, -arch-*, -opensuse-tumbleweed-*, -ubuntu-22.*, -ubuntu-23.*, -ubuntu-24.*, -ubuntu-core-22-*, -centos-9-*, -amazon-linux-2023-*] +systems: + - -fedora-* + - -debian-* + - -arch-* + - -opensuse-tumbleweed-* + - -centos-9-* + - -amazon-linux-2023-* + - -ubuntu-22.* + - -ubuntu-23.* + - -ubuntu-24.* + - -ubuntu-core-22-* + - -ubuntu-core-24-* prepare: | "$TESTSTOOLS"/snaps-state install-local test-snapd-sh diff -Nru snapd-2.62+23.10/tests/main/component/task.yaml snapd-2.63+23.10/tests/main/component/task.yaml --- snapd-2.62+23.10/tests/main/component/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/component/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -3,7 +3,7 @@ details: | Verifies that basic snap component operations (install, refresh, remove) work. -systems: [ubuntu-16.04-64, ubuntu-18.04-64, ubuntu-2*, ubuntu-core-*] +systems: [ubuntu-16.04-64, ubuntu-18.04-64, ubuntu-2*, ubuntu-core-*, fedora-*] execute: | # Build snap and component @@ -15,23 +15,60 @@ exit 1 fi - # Install snap and component + # Install snap snap install --dangerous snap-with-comps_1.0_all.snap - chg_id=$(snap install --no-wait --dangerous snap-with-comps+comp1_1.0.comp) - snap watch "$chg_id" - # Chech component install change was as expected - snap change "$chg_id" | MATCH "^Done .*Prepare component" - snap change "$chg_id" | MATCH "^Done .*Mount component" - snap change "$chg_id" | MATCH "^Done .*Make component .* available to the system" - - # File has been copied around - comp_inst_path=/var/lib/snapd/snaps/snap-with-comps+comp1_x1.comp - stat "$comp_inst_path" - - # Component is mounted (note that we need to escape the "+" in the path) - mount | MATCH "^${comp_inst_path/+/\\+} on /snap/snap-with-comps/components/x1/comp1.*" - # and is seen from snap app - snap-with-comps.test + SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" - # TODO: refresh and remove checks when implemented by snapd + # Install local component function + # $1: expected component revision + install_comp() { + # x1 is the snap revision + # Find out previous comp rev + symlink=$SNAP_MOUNT_DIR/snap-with-comps/components/x1/comp1 + prev_comp_rev=$(basename "$(readlink "$symlink")") + + comp_rev=$1 + chg_id=$(snap install --no-wait --dangerous snap-with-comps+comp1_1.0.comp) + snap watch "$chg_id" + + # Check component install change was as expected + snap change "$chg_id" | MATCH "^Done .*Prepare component" + snap change "$chg_id" | MATCH "^Done .*Mount component" + snap change "$chg_id" | MATCH "^Done .*Make component .* available to the system" + + # File has been copied around + comp_inst_path=/var/lib/snapd/snaps/snap-with-comps+comp1_${comp_rev}.comp + stat "$comp_inst_path" + + # Component is mounted (note that we need to escape the "+" in the path) + mnt_point=$SNAP_MOUNT_DIR/snap-with-comps/components/mnt/comp1/${comp_rev} + mount | MATCH "^${comp_inst_path/+/\\+} on ${mnt_point} .*" + # And symlinked + readlink "$symlink" | MATCH "\\.\\./mnt/comp1/$comp_rev" + readlink -f "$symlink" | MATCH "$mnt_point" + # and is seen from snap app + snap-with-comps.test + + # Old component is not mounted and has been removed + if [ -n "$prev_comp_rev" ]; then + prev_mnt_point=$SNAP_MOUNT_DIR/snap-with-comps/components/mnt/comp1/${prev_comp_rev} + prev_comp_inst_path=/var/lib/snapd/snaps/snap-with-comps+comp1_${prev_comp_rev}.comp + mount | not MATCH "^${prev_comp_inst_path/+/\\+} on ${prev_mnt_point} .*" + not stat "$prev_comp_inst_path" + not stat "$prev_mnt_point" + fi + } + + # Install, then reinstall local component + install_comp x1 + install_comp x2 + + # TODO: add checks for components removals when implemented by snapd + # For the moment, remove the snap and then manually the components + snap remove snap-with-comps + cd /etc/systemd/system/ + systemctl stop -- *'snap\x2dwith\x2dcomps-components-mnt-comp1-x2.mount' + cd - + rm /etc/systemd/system/*'-snap\x2dwith\x2dcomps-components-mnt-comp1-x2.mount' + rm -rf "$SNAP_MOUNT_DIR"/snap-with-comps/ diff -Nru snapd-2.62+23.10/tests/main/fake-netplan-apply/task.yaml snapd-2.63+23.10/tests/main/fake-netplan-apply/task.yaml --- snapd-2.62+23.10/tests/main/fake-netplan-apply/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/fake-netplan-apply/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -38,11 +38,19 @@ sed "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml.in" -e "s/base: BASESNAP/base: core20/" > "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml" snap pack "$TESTSLIB/snaps/netplan-snap" --filename=netplan-snap-20.snap snap install --dangerous netplan-snap-20.snap - elif os.query is-ubuntu-ge 22.04; then + elif os.query is-jammy || os.query is-ubuntu 23.10; then # use base: core22 sed "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml.in" -e "s/base: BASESNAP/base: core22/" > "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml" snap pack "$TESTSLIB/snaps/netplan-snap" --filename=netplan-snap-22.snap snap install --dangerous netplan-snap-22.snap + elif os.query is-noble; then + # use base: core24 + sed "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml.in" -e "s/base: BASESNAP/base: core24/" > "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml" + snap pack "$TESTSLIB/snaps/netplan-snap" --filename=netplan-snap-24.snap + snap install --edge core24 + tests.cleanup defer snap remove --purge core24 + snap install --dangerous netplan-snap-24.snap + tests.cleanup defer snap remove --purge netplan-snap else echo "new core release, please update test for new ubuntu core version" exit 1 @@ -89,6 +97,8 @@ fi done + tests.cleanup restore + execute: | echo "The network-setup-control interface is disconnected by default" snap connections netplan-snap | MATCH 'network-setup-control +netplan-snap:network-setup-control +- +-' diff -Nru snapd-2.62+23.10/tests/main/high-user-handling/task.yaml snapd-2.63+23.10/tests/main/high-user-handling/task.yaml --- snapd-2.62+23.10/tests/main/high-user-handling/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/high-user-handling/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,6 @@ -summary: Check that the refresh data copy works. +summary: Check handling of exceptionally high user IDs +details: | + Check that osutil handling of exceptionally high user IDs is correct. systems: - -ubuntu-14.04-* # no support for tests.session @@ -11,6 +13,7 @@ restore: | tests.session -u hightest restore + loginctl kill-user hightest || true userdel -r hightest execute: | diff -Nru snapd-2.62+23.10/tests/main/install-sideload/task.yaml snapd-2.63+23.10/tests/main/install-sideload/task.yaml --- snapd-2.62+23.10/tests/main/install-sideload/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/install-sideload/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Checks for snap sideload install +details: | + Check snaps installation with --dangerous, --jailmode, --devmode, and + deprecated options. Also validate snaps can be removed with --revision option. + # slow in autopkgtest (>1m) backends: [-autopkgtest] @@ -68,7 +72,7 @@ # TODO:UC20: fix to work on uc20 too # The "seed/" dir is on a FAT partition on uc20 so the permissions are # different here. - if ! os.query is-core20 && ! os.query is-core22; then + if os.query is-core-le 18; then echo "All snap blobs are 0600" test "$( find /var/lib/snapd/{snaps,cache,seed/snaps}/ -type f -printf '%#m\n' | sort -u | xargs )" = "0600" fi diff -Nru snapd-2.62+23.10/tests/main/interfaces-system-observe/task.yaml snapd-2.63+23.10/tests/main/interfaces-system-observe/task.yaml --- snapd-2.62+23.10/tests/main/interfaces-system-observe/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/interfaces-system-observe/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,10 @@ systemctl start systemd-hostnamed fi + # TODO: we should use only one snap for testing "$TESTSTOOLS"/snaps-state install-local testsnap + snap connect testsnap:system-observe + snap connect testsnap:network-setup-observe restore: | if not os.query is-trusty; then @@ -36,7 +39,6 @@ && ! os.query is-arch-linux \ && ! os.query is-opensuse tumbleweed; then echo "Check that we can read /boot" - snap connect testsnap:system-observe KERNEL_VERSION="$(uname -r)" testsnap.cmd cat "/boot/config-$KERNEL_VERSION" | MATCH "CONFIG_" @@ -63,6 +65,21 @@ su -l -c "test-snapd-system-observe-consumer.dbus-introspect" test | MATCH "$expected" fi + echo "Snap is able is able to query systemd properties" + testsnap.cmd busctl call org.freedesktop.systemd1 /org/freedesktop/systemd1 org.freedesktop.DBus.Properties \ + GetAll s org.freedesktop.systemd1.Manager + + # systemd in 14.04 does not implement org.freedesktop.systemd1.Unit for units + if not os.query is-trusty; then + echo "Snap is able to list units" + testsnap.cmd busctl call org.freedesktop.systemd1 /org/freedesktop/systemd1 org.freedesktop.systemd1.Manager ListUnits + + echo "Snap is able to query unit properties" + testsnap.cmd busctl call org.freedesktop.systemd1 /org/freedesktop/systemd1/unit/snapd_2eservice \ + org.freedesktop.DBus.Properties GetAll s org.freedesktop.systemd1.Unit + fi + + if [ "$(snap debug confinement)" = partial ] ; then exit 0 fi @@ -72,6 +89,7 @@ echo "When the plug is disconnected" snap disconnect test-snapd-system-observe-consumer:system-observe + snap disconnect testsnap:system-observe echo "Then the snap is not able to get system information" if su -l -c "test-snapd-system-observe-consumer.consumer" test 2> consumer.error; then @@ -89,3 +107,20 @@ MATCH "Permission denied" < introspect.error fi + echo "Snap is not permitted to query systemd properties" + not testsnap.cmd busctl call org.freedesktop.systemd1 /org/freedesktop/systemd1 org.freedesktop.DBus.Properties \ + GetAll s org.freedesktop.systemd1.Manager 2> log.error + MATCH 'Access denied' < log.error + + if not os.query is-trusty; then + echo "Snap is not able to list units" + not testsnap.cmd busctl call org.freedesktop.systemd1 /org/freedesktop/systemd1 org.freedesktop.systemd1.Manager \ + ListUnits 2> log.error + MATCH 'Access denied' < log.error + + echo "Snap is not permitted to query unit properties" + not testsnap.cmd busctl call org.freedesktop.systemd1 /org/freedesktop/systemd1/unit/snapd_2eservice \ + org.freedesktop.DBus.Properties GetAll s org.freedesktop.systemd1.Unit 2> log.error + MATCH 'Access denied' < log.error + fi + diff -Nru snapd-2.62+23.10/tests/main/interfaces-system-observe/testsnap/meta/snap.yaml snapd-2.63+23.10/tests/main/interfaces-system-observe/testsnap/meta/snap.yaml --- snapd-2.62+23.10/tests/main/interfaces-system-observe/testsnap/meta/snap.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/interfaces-system-observe/testsnap/meta/snap.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -4,4 +4,8 @@ apps: cmd: command: bin/cmd - plugs: [ system-observe ] + plugs: + # for the test + - system-observe + # so that we can run busctl + - network-setup-observe diff -Nru snapd-2.62+23.10/tests/main/listing/task.yaml snapd-2.63+23.10/tests/main/listing/task.yaml --- snapd-2.62+23.10/tests/main/listing/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/listing/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,8 @@ summary: Check snap listings +details: | + Check the output of command `snap list` is the expected based on the current systems + prepare: | "$TESTSTOOLS"/snaps-state install-local test-snapd-sh @@ -32,12 +35,12 @@ NOTES=core #shellcheck disable=SC2166 - if [ "$SPREAD_BACKEND" = "google" -o "$SPREAD_BACKEND" == "qemu" ] && [ "$SPREAD_SYSTEM" = "ubuntu-core-16-64" ]; then + if [ "$SPREAD_BACKEND" = "google" -o "$SPREAD_BACKEND" == "qemu" ] && os.query is-core16; then echo "With customized images the core snap is sideloaded" REV=$SIDELOAD_REV PUBLISHER=- - elif [ "$SPREAD_BACKEND" = "google" -o "$SPREAD_BACKEND" = "google-arm" -o "$SPREAD_BACKEND" == "qemu" ] && [ "$SPREAD_SYSTEM" = "ubuntu-core-18-64" -o "$SPREAD_SYSTEM" = "ubuntu-core-20-64" -o "$SPREAD_SYSTEM" = "ubuntu-core-22-64" -o "$SPREAD_SYSTEM" = "ubuntu-core-22-arm-64" ]; then + elif [ "$SPREAD_BACKEND" = "google" -o "$SPREAD_BACKEND" = "google-arm" -o "$SPREAD_BACKEND" == "qemu" ] && os.query is-core-ge 18; then echo "With customized images the snapd snap is sideloaded" NAME=snapd VERSION=$SNAPD_GIT_VERSION @@ -54,7 +57,7 @@ echo "On the external device the core snap tested could be in any track" TRACKING="(latest/)?(edge|beta|candidate|stable)" - elif [ "$SPREAD_BACKEND" = "external" ] && { os.query is-core18 || os.query is-core20 || os.query is-core22; }; then + elif [ "$SPREAD_BACKEND" = "external" ] && os.query is-core-ge 18; then echo "On the external device the snapd snap tested could be in any track" NAME=snapd VERSION=$SNAPD_GIT_VERSION diff -Nru snapd-2.62+23.10/tests/main/lxd-no-fuse/task.yaml snapd-2.63+23.10/tests/main/lxd-no-fuse/task.yaml --- snapd-2.62+23.10/tests/main/lxd-no-fuse/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/lxd-no-fuse/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,8 @@ -summary: Check that we can install snaps when fuse is missing in lxd +summary: Check that we can install snaps when fuse/fuse3 is missing in lxd + +details: | + Verify that fuse/fuse3 is pulled in as a dependency when installing snapd + from deb. # we just need a single system to verify this systems: [ubuntu-22.04-64] @@ -24,8 +28,8 @@ exit 1 fi - echo "Remove fuse to trigger the fuse precondition check" - lxd.lxc exec my-ubuntu -- apt autoremove -y fuse + echo "Remove fuse/fuse3 to trigger the fuse precondition check" + lxd.lxc exec my-ubuntu -- apt autoremove -y fuse fuse3 echo "Install snapd" lxd.lxc exec my-ubuntu -- mkdir -p "$GOHOME" diff -Nru snapd-2.62+23.10/tests/main/preseed/task.yaml snapd-2.63+23.10/tests/main/preseed/task.yaml --- snapd-2.62+23.10/tests/main/preseed/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/preseed/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -123,7 +123,7 @@ # the list of expected profiles isn't exhaustive, we're just checking some critical ones for prof in snap.lxd.lxd snap.lxd.hook.install snap.lxd.hook.configure snap.lxd.daemon; do test -f "$AA_PROFILES/$prof" - test -f "$SECCOMP_PROFILES/$prof.bin" + test -f "$SECCOMP_PROFILES/$prof.bin2" done echo "Checking that mount units have been created and enabled on the target image" diff -Nru snapd-2.62+23.10/tests/main/preseed-core20/task.yaml snapd-2.63+23.10/tests/main/preseed-core20/task.yaml --- snapd-2.62+23.10/tests/main/preseed-core20/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/preseed-core20/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -167,7 +167,7 @@ MATCH "^var/lib/snapd/cgroup/snap.pc-kernel.device" < files.log MATCH "^var/lib/snapd/seccomp/bpf/snap.pc.hook.configure.src" < files.log - MATCH "^var/lib/snapd/seccomp/bpf/snap.pc.hook.configure.bin" < files.log + MATCH "^var/lib/snapd/seccomp/bpf/snap.pc.hook.configure.bin2" < files.log MATCH "^var/lib/snapd/sequence/pc-kernel.json" < files.log MATCH "^var/lib/snapd/sequence/pc.json" < files.log diff -Nru snapd-2.62+23.10/tests/main/preseed-lxd/task.yaml snapd-2.63+23.10/tests/main/preseed-lxd/task.yaml --- snapd-2.62+23.10/tests/main/preseed-lxd/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/preseed-lxd/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -60,7 +60,7 @@ apt autoremove --purge -y fi - umount "$IMAGE_MOUNTPOINT" + umount "$IMAGE_MOUNTPOINT" rmdir "$IMAGE_MOUNTPOINT" # qemu-nbd -d may sporadically fail when removing the device, @@ -69,6 +69,11 @@ "$TESTSTOOLS"/lxd-state undo-mount-changes + # the test started a privileged LXD container which most likely replaced the + # AppArmor profile for /usr/lib/snapd/snap-confine with its own version, + # restart apparmor.service so we get back the right profiles + systemctl restart apparmor.service + execute: | echo "Create a trivial container using the lxd snap" lxd waitready diff -Nru snapd-2.62+23.10/tests/main/security-device-cgroups/task.yaml snapd-2.63+23.10/tests/main/security-device-cgroups/task.yaml --- snapd-2.62+23.10/tests/main/security-device-cgroups/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-device-cgroups/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,8 +1,14 @@ summary: Ensure that the security rules related to device cgroups work. +details: | + This test checks the when an udev rule assigning a device to a snap is added, + then just that device is assigned to that snap and other devices are not shown + as assigned to the snap + systems: - #TODO: bpftool is not available on core22 and tests.device-cgroup needs it for cgroups v2 + #TODO: bpftool is not available on core22+ and tests.device-cgroup needs it for cgroups v2 - -ubuntu-core-22-* + - -ubuntu-core-24-* environment: DEVICE_NAME/kmsg: kmsg diff -Nru snapd-2.62+23.10/tests/main/security-device-cgroups-helper/task.yaml snapd-2.63+23.10/tests/main/security-device-cgroups-helper/task.yaml --- snapd-2.62+23.10/tests/main/security-device-cgroups-helper/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-device-cgroups-helper/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -4,8 +4,9 @@ The test verifies that snap-device-helper correctly modifies the cgroups systems: - #TODO: bpftool is not available on core22 and tests.device-cgroup needs it for cgroups v2 + #TODO: bpftool is not available on core22+ and tests.device-cgroup needs it for cgroups v2 - -ubuntu-core-22-* + - -ubuntu-core-24-* environment: # note that /dev/full has well known major:minor which is 1:7 diff -Nru snapd-2.62+23.10/tests/main/security-device-cgroups-required-or-optional/task.yaml snapd-2.63+23.10/tests/main/security-device-cgroups-required-or-optional/task.yaml --- snapd-2.62+23.10/tests/main/security-device-cgroups-required-or-optional/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-device-cgroups-required-or-optional/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -6,20 +6,26 @@ bases). systems: - #TODO: bpftool is not available on core22 and tests.device-cgroup needs it for cgroups v2 + #TODO: bpftool is not available on core22+ and tests.device-cgroup needs it for cgroups v2 - -ubuntu-core-22-* + - -ubuntu-core-24-* # no core20 i386 - -ubuntu-18.04-32 execute: | + #shellcheck source=tests/lib/systems.sh + . "$TESTSLIB"/systems.sh + echo "Given snap is installed" "$TESTSTOOLS"/snaps-state install-local test-snapd-sh-core20 test -f /var/lib/snapd/cgroup/snap.test-snapd-sh-core20.device + NOMATCH "non-strict=true" < /var/lib/snapd/cgroup/snap.test-snapd-sh-core20.device # XXX explicitly install core24 until there is no release into the stable channel snap install --edge core24 "$TESTSTOOLS"/snaps-state install-local test-snapd-sh-core24 - test -f /var/lib/snapd/cgroup/snap.test-snapd-sh-core20.device + test -f /var/lib/snapd/cgroup/snap.test-snapd-sh-core24.device + NOMATCH "non-strict=true" < /var/lib/snapd/cgroup/snap.test-snapd-sh-core24.device echo "No devices are assigned to either snap" udevadm info "/dev/null" | NOMATCH "E: TAGS=.*snap_test-snapd-sh.*" @@ -42,3 +48,30 @@ echo "Device is listed as allowed" tests.device-cgroup test-snapd-sh-core24.sh dump | MATCH "c 1:3" + + # drop persistent cgroup information + if is_cgroupv2; then + rm /sys/fs/bpf/snap/snap_test-snapd-sh-core24_sh + test ! -e /sys/fs/bpf/snap/snap_test-snapd-sh-core20_sh + else + rmdir /sys/fs/cgroup/devices/snap.test-snapd-sh-core24.sh + test ! -e /sys/fs/cgroup/devices/snap.test-snapd-sh-core20.sh + fi + + echo "When snaps are installed in devmode" + "$TESTSTOOLS"/snaps-state install-local test-snapd-sh-core20 --devmode + MATCH "non-strict=true" < /var/lib/snapd/cgroup/snap.test-snapd-sh-core20.device + + "$TESTSTOOLS"/snaps-state install-local test-snapd-sh-core24 --devmode + MATCH "non-strict=true" < /var/lib/snapd/cgroup/snap.test-snapd-sh-core24.device + + test-snapd-sh-core20.sh -c 'true' + test-snapd-sh-core24.sh -c 'true' + + if is_cgroupv2; then + test ! -e /sys/fs/bpf/snap/snap_test-snapd-sh-core24_sh + test ! -e /sys/fs/bpf/snap/snap_test-snapd-sh-core20_sh + else + test ! -e /sys/fs/cgroup/devices/snap.test-snapd-sh-core24.sh + test ! -e /sys/fs/cgroup/devices/snap.test-snapd-sh-core20.sh + fi diff -Nru snapd-2.62+23.10/tests/main/security-device-cgroups-self-manage/task.yaml snapd-2.63+23.10/tests/main/security-device-cgroups-self-manage/task.yaml --- snapd-2.62+23.10/tests/main/security-device-cgroups-self-manage/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-device-cgroups-self-manage/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -5,8 +5,9 @@ the device cgroup. systems: - # bpftool is not available on core22 and tests.device-cgroup needs it for cgroups v2 + # bpftool is not available on core22+ and tests.device-cgroup needs it for cgroups v2 - -ubuntu-core-22-* + - -ubuntu-core-24-* # no i386 build of core24 - -ubuntu-18.04-32 # because udev is a mess there diff -Nru snapd-2.62+23.10/tests/main/security-device-cgroups-serial-port/task.yaml snapd-2.63+23.10/tests/main/security-device-cgroups-serial-port/task.yaml --- snapd-2.62+23.10/tests/main/security-device-cgroups-serial-port/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-device-cgroups-serial-port/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,8 +1,14 @@ summary: Ensure that the device cgroup works properly for serial-port. +details: | + This test checks the when an udev rule assigning a serial port device to a snap + is added the tag showing the device is assigned to the snap is added and also it + is shown in the snap device list + systems: - #TODO: bpftool is not available on core22 and tests.device-cgroup needs it for cgroups v2 + #TODO: bpftool is not available on core22+ and tests.device-cgroup needs it for cgroups v2 - -ubuntu-core-22-* + - -ubuntu-core-24-* prepare: | # create serial devices if they don't exist diff -Nru snapd-2.62+23.10/tests/main/security-device-cgroups-strict-enforced/task.yaml snapd-2.63+23.10/tests/main/security-device-cgroups-strict-enforced/task.yaml --- snapd-2.62+23.10/tests/main/security-device-cgroups-strict-enforced/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-device-cgroups-strict-enforced/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -4,8 +4,9 @@ The test verifies that device cgroup control is enforced for tagged devices. systems: - #TODO: bpftool is not available on core22 and tests.device-cgroup needs it for cgroups v2 + #TODO: bpftool is not available on core22+ and tests.device-cgroup needs it for cgroups v2 - -ubuntu-core-22-* + - -ubuntu-core-24-* environment: # note that /dev/full has well known major:minor which is 1:7 @@ -25,7 +26,7 @@ echo "$content" > /etc/udev/rules.d/70-snap.test-strict-cgroup.rules libexecdir=$(os.paths libexec-dir) # populate a RUN rule like the one snapd adds for snap apps - content="TAG==\"snap_test-strict-cgroup_sh\" RUN+=\"$libexecdir/snapd/snap-device-helper snap_test-strict-cgroup_sh\"" + content="TAG==\"snap_test-strict-cgroup_sh\" RUN+=\"$libexecdir/snapd/snap-device-helper \$env{ACTION} snap_test-strict-cgroup_sh \$devpath \$major:\$minor\"" echo "$content" >> /etc/udev/rules.d/70-snap.test-strict-cgroup.rules udevadm control --reload-rules udevadm settle diff -Nru snapd-2.62+23.10/tests/main/security-profiles/task.yaml snapd-2.63+23.10/tests/main/security-profiles/task.yaml --- snapd-2.62+23.10/tests/main/security-profiles/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-profiles/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,5 +1,9 @@ summary: Check security profile generation for apps and hooks. +details: | + This test verifies that profiles are properly generated and loaded for + a set of apps and hooks. + prepare: | snap pack "$TESTSLIB"/snaps/basic-hooks @@ -17,7 +21,7 @@ for profile in snap.test-snapd-tools.block snap.test-snapd-tools.cat snap.test-snapd-tools.echo snap.test-snapd-tools.fail snap.test-snapd-tools.success do MATCH "^${profile} \\(enforce\\)$" <<<"$loaded_profiles" - [ -f "$seccomp_profile_directory/${profile}.bin" ] + [ -f "$seccomp_profile_directory/${profile}.bin2" ] done echo "Security profiles are generated and loaded for hooks" @@ -25,4 +29,4 @@ loaded_profiles=$(cat /sys/kernel/security/apparmor/profiles) echo "$loaded_profiles" | MATCH '^snap.basic-hooks.hook.configure \(enforce\)$' - [ -f "$seccomp_profile_directory/snap.basic-hooks.hook.configure.bin" ] + [ -f "$seccomp_profile_directory/snap.basic-hooks.hook.configure.bin2" ] diff -Nru snapd-2.62+23.10/tests/main/security-seccomp/task.yaml snapd-2.63+23.10/tests/main/security-seccomp/task.yaml --- snapd-2.62+23.10/tests/main/security-seccomp/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/security-seccomp/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,7 @@ environment: SRC: /var/lib/snapd/seccomp/bpf/snap.test-snapd-setpriority.test-snapd-setpriority.src - BIN: /var/lib/snapd/seccomp/bpf/snap.test-snapd-setpriority.test-snapd-setpriority.bin + BIN: /var/lib/snapd/seccomp/bpf/snap.test-snapd-setpriority.test-snapd-setpriority.bin2 AAP: /var/lib/snapd/apparmor/profiles/snap.test-snapd-setpriority.test-snapd-setpriority prepare: | @@ -93,10 +93,16 @@ echo "and check that negative nice fails" test-snapd-setpriority -10 | MATCH 'Operation not permitted \(EPERM\)' - echo "Explicitly deny arg filtered setpriority rule" - sed 's/^\(setpriority.*\)/~\1/g' "$SRC".orig > "$SRC" + # TODO: filtering on setpriority is a bit confusing as it is not part + # of the "negative args" filter added in ec7c9f27c97 so the fact that + # negative args are denied is a bit magic + echo "Explicitly deny arg filtered setpriority rule takes precedence to (allow) arg filtered setpriority rule" + sed 's/^\(setpriority.*\)/#SPREAD: \1\nsetpriority PRIO_PROCESS 0 <=19/g' "$SRC".orig > "$SRC" + echo '~setpriority PRIO_PROCESS 0 >10' >> "$SRC" snapd.tool exec snap-seccomp compile "$SRC" "$BIN" - echo "and check that positive nice fails with explicit denial" - test-snapd-setpriority 10 | MATCH 'Insufficient privileges \(EACCES\)' + echo "and check that positive non-explicitly denied nice succeeds" + test-snapd-setpriority 10 | MATCH 'Successfully used setpriority\(PRIO_PROCESS, 0, 10\)' + echo "and check that explicitly denied parameters fail with the explicit denial error code" + test-snapd-setpriority 11 | MATCH 'Insufficient privileges \(EACCES\)' echo "and check that negative nice still fails with implicit denial" test-snapd-setpriority -10 | MATCH 'Operation not permitted \(EPERM\)' diff -Nru snapd-2.62+23.10/tests/main/services-user/task.yaml snapd-2.63+23.10/tests/main/services-user/task.yaml --- snapd-2.62+23.10/tests/main/services-user/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/services-user/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -37,6 +37,7 @@ userdel --extrausers -rf test2 fi snap unset system experimental.user-daemons + rm -f /etc/polkit-1/localauthority/50-local.d/spread.pkla debug: | tests.session dump @@ -55,14 +56,54 @@ systemctl status snap.test-snapd-user-service.svc3.service | MATCH "running" systemctl status snap.test-snapd-user-service.svc4.service | MATCH "running" + echo "(root) Verifying what snap services is reporting" + snap services | cat -n > services-root.txt + MATCH " 1\s+Service\s+Startup\s+Current\s+Notes$" < services-root.txt + MATCH " 2\s+test-snapd-user-service.svc1\s+enabled\s+-\s+user$" < services-root.txt + MATCH " 3\s+test-snapd-user-service.svc2\s+enabled\s+-\s+user$" < services-root.txt + MATCH " 4\s+test-snapd-user-service.svc3\s+enabled\s+active\s+-$" < services-root.txt + MATCH " 5\s+test-snapd-user-service.svc4\s+enabled\s+active\s+-$" < services-root.txt + + echo "(root) Verifying what snap services is reporting with --user" + snap services --user | cat -n > services-root-user.txt + MATCH " 1\s+Service\s+Startup\s+Current\s+Notes$" < services-root-user.txt + MATCH " 2\s+test-snapd-user-service.svc1\s+enabled\s+active\s+user$" < services-root-user.txt + MATCH " 3\s+test-snapd-user-service.svc2\s+enabled\s+active\s+user$" < services-root-user.txt + MATCH " 4\s+test-snapd-user-service.svc3\s+enabled\s+active\s+-$" < services-root-user.txt + MATCH " 5\s+test-snapd-user-service.svc4\s+enabled\s+active\s+-$" < services-root-user.txt + echo "(user test) We can see the user services running" tests.session -u test exec systemctl --user is-active snap.test-snapd-user-service.svc1.service | MATCH "active" tests.session -u test exec systemctl --user is-active snap.test-snapd-user-service.svc2.service | MATCH "active" + echo "(user test) Verifying what snap services is reporting" + tests.session -u test exec snap services | cat -n > services-user.txt + MATCH " 1\s+Service\s+Startup\s+Current\s+Notes$" < services-user.txt + MATCH " 2\s+test-snapd-user-service.svc1\s+enabled\s+active\s+user$" < services-user.txt + MATCH " 3\s+test-snapd-user-service.svc2\s+enabled\s+active\s+user$" < services-user.txt + MATCH " 4\s+test-snapd-user-service.svc3\s+enabled\s+active\s+-$" < services-user.txt + MATCH " 5\s+test-snapd-user-service.svc4\s+enabled\s+active\s+-$" < services-user.txt + echo "(user test2) We can see the user services running" tests.session -u test2 exec systemctl --user is-active snap.test-snapd-user-service.svc1.service | MATCH "active" tests.session -u test2 exec systemctl --user is-active snap.test-snapd-user-service.svc2.service | MATCH "active" + echo "(user test2) Verifying what snap services is reporting" + tests.session -u test2 exec snap services | cat -n > services-user2.txt + MATCH " 1\s+Service\s+Startup\s+Current\s+Notes$" < services-user2.txt + MATCH " 2\s+test-snapd-user-service.svc1\s+enabled\s+active\s+user$" < services-user2.txt + MATCH " 3\s+test-snapd-user-service.svc2\s+enabled\s+active\s+user$" < services-user2.txt + MATCH " 4\s+test-snapd-user-service.svc3\s+enabled\s+active\s+-$" < services-user2.txt + MATCH " 5\s+test-snapd-user-service.svc4\s+enabled\s+active\s+-$" < services-user2.txt + + # when --global is passed we should see identical output to the one from root + tests.session -u test exec snap services --global | cat -n > services-user-global.txt + MATCH " 1\s+Service\s+Startup\s+Current\s+Notes$" < services-user-global.txt + MATCH " 2\s+test-snapd-user-service.svc1\s+enabled\s+-\s+user$" < services-user-global.txt + MATCH " 3\s+test-snapd-user-service.svc2\s+enabled\s+-\s+user$" < services-user-global.txt + MATCH " 4\s+test-snapd-user-service.svc3\s+enabled\s+active\s+-$" < services-user-global.txt + MATCH " 5\s+test-snapd-user-service.svc4\s+enabled\s+active\s+-$" < services-user-global.txt + # if root is making this request, implied scopes are all echo "(root) Stopping all services for snap" snap stop test-snapd-user-service diff -Nru snapd-2.62+23.10/tests/main/snap-run-inhibition-flow/api-client/bin/api-client.py snapd-2.63+23.10/tests/main/snap-run-inhibition-flow/api-client/bin/api-client.py --- snapd-2.62+23.10/tests/main/snap-run-inhibition-flow/api-client/bin/api-client.py 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/main/snap-run-inhibition-flow/api-client/bin/api-client.py 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,40 @@ +#!/usr/bin/python3 + +import argparse +import http.client +import sys +import socket + +class UnixSocketHTTPConnection(http.client.HTTPConnection): + def __init__(self, socket_path): + super().__init__('localhost') + self._socket_path = socket_path + + def connect(self): + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(self._socket_path) + self.sock = s + + +def main(argv): + parser = argparse.ArgumentParser('Call the snapd REST API') + parser.add_argument('--socket', default='/run/snapd.socket', + help='The socket path to connect to') + parser.add_argument('--method', default='GET', + help='The HTTP method to use') + parser.add_argument('path', metavar='PATH', + help='The HTTP path to request') + parser.add_argument('body', metavar='BODY', default=None, nargs='?', + help='The HTTP request body') + args = parser.parse_args(argv[1:]) + + conn = UnixSocketHTTPConnection(args.socket) + conn.request(args.method, args.path, args.body) + + response = conn.getresponse() + body = response.read() + print(body.decode('UTF-8')) + return response.status >= 300 + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff -Nru snapd-2.62+23.10/tests/main/snap-run-inhibition-flow/api-client/meta/snap.yaml snapd-2.63+23.10/tests/main/snap-run-inhibition-flow/api-client/meta/snap.yaml --- snapd-2.62+23.10/tests/main/snap-run-inhibition-flow/api-client/meta/snap.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/main/snap-run-inhibition-flow/api-client/meta/snap.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,8 @@ +name: api-client +version: 1 +base: core18 +apps: + api-client: + command: bin/api-client.py + plugs: + - snap-refresh-observe diff -Nru snapd-2.62+23.10/tests/main/snap-run-inhibition-flow/task.yaml snapd-2.63+23.10/tests/main/snap-run-inhibition-flow/task.yaml --- snapd-2.62+23.10/tests/main/snap-run-inhibition-flow/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/main/snap-run-inhibition-flow/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,56 @@ +summary: Check that snap run notifies the user about run inhibition due to refreshes. + +details: | + This test exercises the inhibition flow triggered when snap run is + inhibited from running due to an onging refresh. When snap run is inhibited + it record a snap-run-inhibit notice which should be parsed by another + client (e.g. snapd-desktop-integration snap). + + TODO: Add a check for the text fallback + If snap run detects that no snap has the marker interface connected and + we are running in a terminal then snap run falls back to showing a text + notification. + +environment: + SNAPD_INHIBIT_DIR: "/var/lib/snapd/inhibit" + +prepare: | + snap install --edge jq + + echo "Install snap with marker snap-refresh-observe interface connected" + "$TESTSTOOLS"/snaps-state install-local api-client + snap connect api-client:snap-refresh-observe + + # Make sure inhibit dir exists + mkdir -p $SNAPD_INHIBIT_DIR + + # Mock test-snapd-tools snap as inhibited due to refresh + snap install test-snapd-tools + SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" + REVNO="$(readlink "$SNAP_MOUNT_DIR"/test-snapd-tools/current)" + echo -n "refresh" > $SNAPD_INHIBIT_DIR/test-snapd-tools.lock + echo -n '{"previous":"'"${REVNO}"'"}' > $SNAPD_INHIBIT_DIR/test-snapd-tools.refresh + +restore: | + rm -f $SNAPD_INHIBIT_DIR/test-snapd-tools.lock + rm -f $SNAPD_INHIBIT_DIR/test-snapd-tools.refresh + snap remove --purge test-snapd-tools + + snap remove --purge api-client + snap remove --purge jq + +execute: | + echo "Try running inhibited snap" + touch output + test-snapd-tools.echo hi > output 2>&1 & + echo "Command is waiting due to inhibition, no output" + NOMATCH "hi" < output + + # Notice is recorded for inhibition + api-client --socket /run/snapd-snap.socket "/v2/notices?types=snap-run-inhibit&keys=test-snapd-tools" | jq '.result[0].occurrences' | MATCH '^1$' + + echo "Mark snap as no longer inhibited" + echo -n "" > $SNAPD_INHIBIT_DIR/test-snapd-tools.lock + echo "snap is no longer inhibited, command should run now" + sleep 1 + MATCH "hi" < output diff -Nru snapd-2.62+23.10/tests/main/snap-run-symlink-error/task.yaml snapd-2.63+23.10/tests/main/snap-run-symlink-error/task.yaml --- snapd-2.62+23.10/tests/main/snap-run-symlink-error/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/snap-run-symlink-error/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,4 +1,11 @@ -summary: Check error handling in symlinks to /usr/bin/snap +summary: Check error handling in symlinks to /usr/bin/snap" + +details: | + Check that missing current symlinks are detected by snap run and + does not cause an infinite retry loop. + +environment: + SNAPD_DEBUG: "1" restore: | SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" @@ -11,14 +18,14 @@ ln -s /usr/bin/snap "$SNAP_MOUNT_DIR/bin/xxx" echo Running unknown command - output="$("$SNAP_MOUNT_DIR/bin/xxx" 2>&1 )" && exit 1 + "$SNAP_MOUNT_DIR/bin/xxx" > output.txt 2>&1 && exit 1 err=$? - echo "$output" + cat output.txt echo Verifying error message if [[ $err -ne 46 ]]; then echo "expected error code 46 but got $err" exit 1 fi - expected="internal error, please report: running \"xxx\" failed: cannot find current revision for snap xxx: readlink $SNAP_MOUNT_DIR/xxx/current: no such file or directory" - test "$output" = "$expected" + MATCH "internal error, please report: running \"xxx\" failed: race condition detected, snap-run can only retry once" < output.txt + MATCH "cannot find current revision for snap xxx: readlink $SNAP_MOUNT_DIR/xxx/current: no such file or directory" < output.txt diff -Nru snapd-2.62+23.10/tests/main/snap-seccomp/task.yaml snapd-2.63+23.10/tests/main/snap-seccomp/task.yaml --- snapd-2.62+23.10/tests/main/snap-seccomp/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/main/snap-seccomp/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,8 +1,14 @@ summary: Ensure that the snap-seccomp bpf handling works -# FIXME: once $(snap debug confinment) can be used (in 2.27+) remove -# the systems line -systems: [ubuntu-16*, ubuntu-18*] +details: | + This test installs the test-snapd-sh snap and runs different checks + to validate that snap-seccomp bpf handling works as expected. Those + checks include the use of @complain and @unrestricted keywords, + missing, empty and invalid profiles, checks the filter size limit + and ensures that that snap-confine waits for security profiles to + appear. It also verifies that amd64 arch works with i386 binaries. + +systems: [ubuntu-*] # Start early as it takes a long time. priority: 100 @@ -16,26 +22,30 @@ snap install test-snapd-sh test-snapd-sh.sh -c 'echo hello' | MATCH hello - # FIXME: use dirs.sh in 2.27+ - echo "Ensure snap-seccomp is statically linked" - if ldd /usr/lib/snapd/snap-seccomp | MATCH libseccomp ; then - echo "found dynamically linked libseccomp, we need a staticly linked one" - exit 1 + # The 16.04 build is static as it goes into the snapd snap but 14.04 + # is dynamically linked. + if ! os.query is-trusty; then + echo "Ensure snap-seccomp is statically linked" + # FIXME: use dirs.sh in 2.27+ + if ldd /usr/lib/snapd/snap-seccomp | MATCH libseccomp ; then + echo "found dynamically linked libseccomp, we need a staticly linked one" + exit 1 + fi fi # from the old test_complain echo "Test that the @complain keyword works" - rm -f "${PROFILE}.bin" + rm -f "${PROFILE}.bin2" cat >"${PROFILE}.src" <"${PROFILE}.src" <"${PROFILE}.src" <"${PROFILE}.src" <&1 ); then echo "test-snapd-sh.sh should fail with invalid seccomp profile" exit 1 fi - echo "$output" | MATCH "cannot apply seccomp profile: Invalid argument" + echo "$output" | MATCH "unexpected seccomp header: .*" - echo "Add huge snapd.test-snapd-sh.bin to ensure size limit works" - dd if=/dev/zero of="${PROFILE}.bin" count=50 bs=1M + echo "Add huge snapd.test-snapd-sh filters to ensure size limit works" + dd if=/dev/zero of="${PROFILE}.bin2" count=50 bs=1M if output=$(test-snapd-sh.sh -c 'echo hello' 2>&1 ); then echo "test-snapd-sh.sh should fail with big seccomp profile" exit 1 fi - echo "$output" | MATCH "cannot fit .* to memory buffer" + # TODO: adjust the test so that the header is valid and the profile big + #echo "$output" | MATCH "cannot fit .* to memory buffer" - echo "Ensure the code cannot not run with a missing .bin profile" - rm -f "${PROFILE}.bin" + echo "Ensure the code cannot not run with a missing filter profile" + rm -f "${PROFILE}.bin2" if test-snapd-sh.sh -c 'echo hello'; then echo "filtering broken: program should have failed to run" exit 1 fi echo "Ensure the code cannot not run with an empty seccomp profile" - rm -f "${PROFILE}.bin" + rm -f "${PROFILE}.bin2" echo "" > "${PROFILE}.src" - $SNAP_SECCOMP compile "${PROFILE}.src" "${PROFILE}.bin" + $SNAP_SECCOMP compile "${PROFILE}.src" "${PROFILE}.bin2" if test-snapd-sh.sh -c 'echo hello'; then echo "filtering broken: program should have failed to run" exit 1 fi echo "Ensure snap-confine waits for security profiles to appear" - rm -f "${PROFILE}.bin" + rm -f "${PROFILE}.bin2" cat >"${PROFILE}.src" < stderr.out; then - INSTALLED=$(( INSTALLED + 1 )) - else - # this could cause the failure https://bugs.launchpad.net/snapstore-server/+bug/2049071 - MATCH "error: snap \"$SNAP\" not found" < stderr.out - fi - - if [ "$INSTALLED" = "$NUM_SNAPS" ]; then - echo "already $NUM_SNAPS installed, now check other features" - break - fi - done - done - - snap refresh - snap services - snap list diff -Nru snapd-2.62+23.10/tests/perf/interfaces-core-provided/task.yaml snapd-2.63+23.10/tests/perf/interfaces-core-provided/task.yaml --- snapd-2.62+23.10/tests/perf/interfaces-core-provided/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/interfaces-core-provided/task.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -summary: Ensure the system handles properly a big number of core provided connections - -details: | - Install a test snap that plugs as many core provided interfaces as is - possible and verify the command can run. This will help catch performance - issues in snapd, AppArmor,seccomp policy parsing, etc. - -environment: - CONSUMER_SNAP: test-snapd-policy-app-consumer - -prepare: | - echo "Given a snap is installed" - "$TESTSTOOLS"/snaps-state install-local "$CONSUMER_SNAP" - - # If possible, prepare a session for the test user. On many systems this - # will allow running all tests as the unprivileged user. This shields us - # from accidentally triggering any additional processes from run in the - # session of the root user and stay behind after this test terminates. - if tests.session has-session-systemd-and-dbus; then - tests.session -u test prepare - fi - -restore: | - # Remove the snaps to avoid timeout in next test - if tests.session has-session-systemd-and-dbus; then - tests.session -u test restore - fi - -execute: | - echo "For each core-provided slot" - SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" - for plugcmd in "$SNAP_MOUNT_DIR"/bin/"$CONSUMER_SNAP".* ; do - - # Just connect CONNECTIONS_PERCENTAGE of the interfaces on - # the current system - if [ -n "$CONNECTIONS_PERCENTAGE" ] && [ "$((RANDOM % (100 / CONNECTIONS_PERCENTAGE) ))" != 0 ]; then - echo "skipping plug: $plugcmd" - continue - fi - - plugcmd_bn=$(basename "$plugcmd") - plug_iface=$(echo "$plugcmd_bn" | tr '.' ':') - #shellcheck disable=SC2001 - slot_iface=$(echo "$plug_iface" | sed "s/$CONSUMER_SNAP//") - - # we test browser-support two different ways, so account for that - if [ "$plug_iface" = "$CONSUMER_SNAP:browser-sandbox" ]; then - slot_iface=":browser-support" - fi - - CONNECTED_PATTERN="$slot_iface +.*$CONSUMER_SNAP" - DISCONNECTED_PATTERN="$slot_iface +-" - - # Skip any interfaces that core doesn't ship - if ! snap interfaces | grep -E -q "$slot_iface +"; then - echo "$slot_iface not present, skipping" - continue - fi - - if [ "$plug_iface" = "$CONSUMER_SNAP:qualcomm-ipc-router" ] && ( os.query is-trusty || os.query is-xenial || os.query is-core16) ; then - # the qualcomm-ipc-router interface is known not to work on xenial, - # just check that it cannot be connected and move on - snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "cannot connect plug on system without qipcrtr socket support" - continue - fi - - if [ "$plug_iface" = "$CONSUMER_SNAP:mount-control" ] && os.query is-trusty ; then - # systemd version is too old, skipping - snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "systemd version 204 is too old \\(expected at least 209\\)" - continue - fi - - # The netlink-audit interface adds the `audit_read` capability to the - # AppArmor profile, but that's not supported on some older systems - if [ "$plug_iface" = "$CONSUMER_SNAP:netlink-audit" ] && os.query is-trusty; then - snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "cannot connect plug on system without audit_read support" - continue - fi - - echo "When slot $slot_iface is connected" - if snap interfaces | grep -E -q "$DISCONNECTED_PATTERN"; then - if [ "$slot_iface" = ":broadcom-asic-control" ] || [ "$slot_iface" = ":firewall-control" ] || [ "$slot_iface" = ":kubernetes-support" ] || [ "$slot_iface" = ":microstack-support" ] || [ "$slot_iface" = ":openvswitch-support" ] || [ "$slot_iface" = ":ppp" ]; then - # TODO: when the kmod backend no longer fails on missing - # modules, we can remove this - snap connect "$plug_iface" "$slot_iface" || true - else - snap connect "$plug_iface" "$slot_iface" - fi - fi - snap interfaces | MATCH "$CONNECTED_PATTERN" - - echo "Then $plugcmd should succeed" - if tests.session has-session-systemd-and-dbus; then - tests.session -u test exec "$plugcmd" | MATCH PASS - else - # If we cannot run the plug command as the test user, in the - # relative safety of the user session which gets torn down, then - # run the test directly EXCEPT when testing the desktop interface. - # - # The desktop interface causes, at minimum, XDG document portal to - # activate in the root users's session, which is not cleaned up. - # Since that interface will only be used in a real session, leaving - # it out is acceptable. - if [ "$plugcmd" != "${CONSUMER_SNAP}.desktop" ]; then - "$plugcmd" | MATCH PASS - else - echo "skipping $plugcmd on an unsupported system" - fi - fi - - echo "Finally disconnect the interface" - if [ "$DISCONNECT_INTERFACES" == true ] && snap interfaces | grep -E -q "$CONNECTED_PATTERN"; then - if [ "$plug_iface" = "$CONSUMER_SNAP:browser-sandbox" ]; then - snap disconnect "$CONSUMER_SNAP:browser-support" "$slot_iface" - else - snap disconnect "$plug_iface" "$slot_iface" - fi - fi - done - - echo "Removing the consumer snap" - # When DISCONNECT_INTERFACES = false, then all the interfaces are connected and - # are disconnected suring the snap removal - snap remove --purge "$CONSUMER_SNAP" diff -Nru snapd-2.62+23.10/tests/perf/interfaces-snap-provided/task.yaml snapd-2.63+23.10/tests/perf/interfaces-snap-provided/task.yaml --- snapd-2.62+23.10/tests/perf/interfaces-snap-provided/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/interfaces-snap-provided/task.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -summary: Ensure the system handles properly a big number of snap provided connections - -details: | - Install a test snap that plugs as many snap provided interfaces as is - possible and verify the command can run. This will help catch performance - issues in snapd, AppArmor,seccomp policy parsing, etc. - -environment: - CONSUMER_SNAP: test-snapd-policy-app-consumer - -execute: | - PROVIDER_SNAP="test-snapd-policy-app-provider-classic" - if os.query is-core; then - PROVIDER_SNAP="test-snapd-policy-app-provider-core" - fi - - echo "Given a snap is installed" - "$TESTSTOOLS"/snaps-state install-local "$PROVIDER_SNAP" - "$TESTSTOOLS"/snaps-state install-local "$CONSUMER_SNAP" - - echo "For each snap-provided slot from $PROVIDER_SNAP" - SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" - for slotcmd in "$SNAP_MOUNT_DIR"/bin/"$PROVIDER_SNAP".* ; do - slotcmd_bn=$(basename "$slotcmd") - slot_iface=$(echo "$slotcmd_bn" | tr '.' ':') - - #shellcheck disable=SC2001 - plugcmd=$(echo "$slotcmd" | sed "s/$PROVIDER_SNAP/$CONSUMER_SNAP/") - plugcmd_bn=$(basename "$plugcmd") - plug_iface=$(echo "$plugcmd_bn" | tr '.' ':') - - CONNECTED_PATTERN="$slot_iface +$CONSUMER_SNAP" - - echo "When slot $slot_iface is connected" - snap connect "$plug_iface" "$slot_iface" - snap interfaces | MATCH "$CONNECTED_PATTERN" - - echo "Then $slotcmd should succeed" - "$slotcmd" | MATCH PASS - - echo "Then $plugcmd should succeed" - "$plugcmd" | MATCH PASS - - echo "Finally disconnect the interface" - if [ "$DISCONNECT_INTERFACES" == true ]; then - snap disconnect "$plug_iface" "$slot_iface" - fi - done - - echo "Removing the consumer snap" - # When DISCONNECT_INTERFACES = false, then all the interfaces are connected and - # are disconnected suring the snap removal - PROVIDER_SNAP="test-snapd-policy-app-provider-classic" - if os.query is-core; then - PROVIDER_SNAP="test-snapd-policy-app-provider-core" - fi - snap remove --purge "$PROVIDER_SNAP" - snap remove --purge "$CONSUMER_SNAP" diff -Nru snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run --- snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -if [ -z "$1" ]; then - echo "PASS" - exit 0 -fi - -# Also allow calling the shell directly with options -/bin/bash --norc "$@" diff -Nru snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml --- snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -name: test-snapd-policy-app-provider-classic -version: 1.0 -summary: Test policy app for providing slot interface policy from snapd -description: Test policy app for non-implicitOnClassic slots -confinement: strict - -slots: - content-read: - interface: content - content: test-content - read: - - $SNAP/content - dbus-session: - interface: dbus - bus: session - name: test.session - dbus-system: - interface: dbus - bus: system - name: test.system - location-control: null - location-observe: null - lxd: null - maliit: null - media-hub: null - microceph: null - microovn: null - mir: null - mpris: - name: test-policy-app-provider-classic - online-accounts-service: null - storage-framework-service: null - thumbnailer-service: null - ubuntu-download-manager: null - unity8: null - unity8-calendar: null - unity8-contacts: null - -apps: - content-read: - command: bin/run - slots: [ content-read ] - dbus-session: - command: bin/run - slots: [ dbus-session ] - dbus-system: - command: bin/run - slots: [ dbus-system ] - docker: - command: bin/run - slots: [ docker ] - fwupd: - command: bin/run - slots: [ fwupd ] - location-control: - command: bin/run - slots: [ location-control ] - location-observe: - command: bin/run - slots: [ location-observe ] - lxd: - command: bin/run - slots: [ lxd ] - maliit: - command: bin/run - slots: [ maliit ] - media-hub: - command: bin/run - slots: [ media-hub ] - microceph: - command: bin/run - slots: [ microceph ] - microovn: - command: bin/run - slots: [ microovn ] - mir: - command: bin/run - slots: [ mir ] - mpris: - command: bin/run - slots: [ mpris ] - online-accounts-service: - command: bin/run - slots: [ online-accounts-service ] - storage-framework-service: - command: bin/run - slots: [ storage-framework-service ] - thumbnailer-service: - command: bin/run - slots: [ thumbnailer-service ] - ubuntu-download-manager: - command: bin/run - slots: [ ubuntu-download-manager ] - unity8: - command: bin/run - slots: [ unity8 ] - unity8-calendar: - command: bin/run - slots: [ unity8-calendar ] - unity8-contacts: - command: bin/run - slots: [ unity8-contacts ] diff -Nru snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run --- snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -if [ -z "$1" ]; then - echo "PASS" - exit 0 -fi - -# Also allow calling the shell directly with options -/bin/bash --norc "$@" diff -Nru snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml --- snapd-2.62+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -name: test-snapd-policy-app-provider-core -version: 1.0 -summary: Test policy app for providing slot interface policy from snapd -description: Test policy app for non-implicitOnCore slots -confinement: strict - -slots: - audio-playback: null - audio-record: null - avahi-control: null - avahi-observe: null - bluez: null - content-read: - interface: content - content: test-content - read: - - $SNAP/content - cups: null - cups-control: null - dbus-session: - interface: dbus - bus: session - name: test.session - dbus-system: - interface: dbus - bus: system - name: test.system - desktop: null - docker: null - fwupd: null - location-control: null - location-observe: null - lxd: null - maliit: null - media-hub: null - microceph: null - microovn: null - mir: null - modem-manager: null - network-manager: null - network-manager-observe: null - ofono: null - online-accounts-service: null - pulseaudio: null - storage-framework-service: null - thumbnailer-service: null - ubuntu-download-manager: null - udisks2: null - unity8: null - unity8-calendar: null - unity8-contacts: null - upower-observe: null - wayland: null - x11: null - -apps: - audio-playback: - command: bin/run - slots: [ audio-playback ] - audio-record: - command: bin/run - slots: [ audio-record ] - avahi-control: - command: bin/run - slots: [ avahi-control ] - avahi-observe: - command: bin/run - slots: [ avahi-observe ] - bluez: - command: bin/run - slots: [ bluez ] - content-read: - command: bin/run - slots: [ content-read ] - cups: - command: bin/run - slots: [ cups ] - cups-control: - command: bin/run - slots: [ cups-control ] - dbus-session: - command: bin/run - slots: [ dbus-session ] - dbus-system: - command: bin/run - slots: [ dbus-system ] - docker: - command: bin/run - slots: [ docker ] - fwupd: - command: bin/run - slots: [ fwupd ] - location-control: - command: bin/run - slots: [ location-control ] - location-observe: - command: bin/run - slots: [ location-observe ] - lxd: - command: bin/run - slots: [ lxd ] - maliit: - command: bin/run - slots: [ maliit ] - media-hub: - command: bin/run - slots: [ media-hub ] - microceph: - command: bin/run - slots: [ microceph ] - microovn: - command: bin/run - slots: [ microovn ] - mir: - command: bin/run - slots: [ mir ] - modem-manager: - command: bin/run - slots: [ modem-manager ] - network-manager: - command: bin/run - slots: [ network-manager ] - network-manager-observe: - command: bin/run - slots: [ network-manager-observe ] - ofono: - command: bin/run - slots: [ ofono ] - online-accounts-service: - command: bin/run - slots: [ online-accounts-service ] - pulseaudio: - command: bin/run - slots: [ pulseaudio ] - storage-framework-service: - command: bin/run - slots: [ storage-framework-service ] - thumbnailer-service: - command: bin/run - slots: [ thumbnailer-service ] - ubuntu-download-manager: - command: bin/run - slots: [ ubuntu-download-manager ] - udisks2: - command: bin/run - slots: [ udisks2 ] - unity8: - command: bin/run - slots: [ unity8 ] - unity8-calendar: - command: bin/run - slots: [ unity8-calendar ] - unity8-contacts: - command: bin/run - slots: [ unity8-contacts ] - upower-observe: - command: bin/run - slots: [ upower-observe ] - wayland: - command: bin/run - slots: [ wayland ] - x11: - command: bin/run - slots: [ x11 ] diff -Nru snapd-2.62+23.10/tests/perf/main/install-many-snaps/task.yaml snapd-2.63+23.10/tests/perf/main/install-many-snaps/task.yaml --- snapd-2.62+23.10/tests/perf/main/install-many-snaps/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/install-many-snaps/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,74 @@ +summary: Ensure the system handles properly a big number of different installed snaps + +details: | + Install different snaps many times based on arch availability. + This will help catch performance issues in snapd, AppArmor, etc. + +warn-timeout: 5m +kill-timeout: 90m + +execute: | + if [ -z "$NUM_SNAPS" ]; then + NUM_SNAPS=100 + fi + + LETTERS="$(echo {a..z})" + INSTALLED=0 + CHANNEL='stable' + + # shellcheck disable=SC2086 + for letter in $LETTERS; do + if [ "$INSTALLED" = "$NUM_SNAPS" ]; then + echo "already $NUM_SNAPS installed, now check other features" + break + fi + + snaps="$(snap find --narrow "$letter")" + SNAP_NAMES="$(echo "$snaps" | awk '{if($4~/-/){print $1}}' | tail -n+2)" + for SNAP in $SNAP_NAMES; do + # Get the info from latest/$CHANNEL + # shellcheck disable=SC2153 + if ! CHANNEL_INFO="$(snap info --unicode=never "$SNAP" | grep " latest/$CHANNEL: ")"; then + echo "Snap $SNAP not found" + continue + fi + PARAMS="" + if echo "$CHANNEL_INFO" | MATCH "$CHANNEL:.*-$"; then + snap install --no-wait "$SNAP" "--$CHANNEL" + elif echo "$CHANNEL_INFO" | MATCH "$CHANNEL:.*classic$"; then + if "$TESTSTOOLS"/snaps-state is-confinement-supported classic; then + PARAMS="--classic" + else + echo "The snap $SNAP requires classic confinement which is not supported yet" + continue + fi + elif echo "$CHANNEL_INFO" | MATCH "$CHANNEL:.*jailmode$"; then + PARAMS="--jailmode" + elif echo "$CHANNEL_INFO" | MATCH "$CHANNEL:.*devmode$"; then + PARAMS="--devmode" + else + echo "Channel info not proccessed correctly: $CHANNEL_INFO" + continue + fi + + if snap install --no-wait "$SNAP" --$CHANNEL $PARAMS 2> stderr.out; then + INSTALLED=$(( INSTALLED + 1 )) + else + # this could cause the failure https://bugs.launchpad.net/snapstore-server/+bug/2049071 + MATCH "error: snap \"$SNAP\" not found" < stderr.out + fi + + if [ "$INSTALLED" = "$NUM_SNAPS" ]; then + echo "already $NUM_SNAPS installed, now check other features" + break + fi + done + done + + while snap changes | MATCH " (Do |Doing ).*Install \".*\" snap"; do + sleep 1 + done + + snap refresh + snap services + snap list diff -Nru snapd-2.62+23.10/tests/perf/main/install-many-snaps-no-wait/task.yaml snapd-2.63+23.10/tests/perf/main/install-many-snaps-no-wait/task.yaml --- snapd-2.62+23.10/tests/perf/main/install-many-snaps-no-wait/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/install-many-snaps-no-wait/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,59 @@ +summary: Ensure the system properly handles installing many snaps with the --no-wait flag + +details: | + Install different snaps many times based on arch availability. In this + scenario all the snaps are installed in parallel and the test checks + the system can handel all the installs and ramains running. + +kill-timeout: 30m + +execute: | + if [ -z "$NUM_SNAPS" ]; then + NUM_SNAPS=20 + fi + + LETTERS="$(echo {a..z})" + INCLUDED=0 + CHANNEL="stable" + SNAP_LIST="" + + # shellcheck disable=SC2086 + for letter in $LETTERS; do + if [ "$INCLUDED" = "$NUM_SNAPS" ]; then + echo "already $NUM_SNAPS ready to install, now check other features" + break + fi + + snaps="$(snap find --narrow "$letter")" + SNAP_NAMES="$(echo "$snaps" | awk '{if($4~/-/){print $1}}' | tail -n+2)" + for SNAP in $SNAP_NAMES; do + # Get the info from latest/$CHANNEL + # shellcheck disable=SC2153 + if ! CHANNEL_INFO="$(snap info --unicode=never "$SNAP" | grep " latest/$CHANNEL: ")"; then + echo "Snap $SNAP not found" + continue + fi + if echo "$CHANNEL_INFO" | MATCH "$CHANNEL:.*-$"; then + SNAP_LIST="$SNAP_LIST $SNAP" + INCLUDED=$(( INCLUDED + 1 )) + fi + + if [ "$INCLUDED" = "$NUM_SNAPS" ]; then + echo "already $NUM_SNAPS included in the list" + break + fi + done + done + + echo "Installing snaps: $SNAP_LIST" + # shellcheck disable=SC2086 + for SNAP in $SNAP_LIST; do + snap install --no-wait "--$CHANNEL" "$SNAP" + done + + while snap changes | MATCH " (Do |Doing ).*Install \".*\" snap"; do + sleep 1 + done + + test "$(snap changes | grep -cE ' Done.*Install \".*\" snap')" -ge "$NUM_SNAPS" + systemctl is-active snapd diff -Nru snapd-2.62+23.10/tests/perf/main/interfaces-core-provided/task.yaml snapd-2.63+23.10/tests/perf/main/interfaces-core-provided/task.yaml --- snapd-2.62+23.10/tests/perf/main/interfaces-core-provided/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/interfaces-core-provided/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,124 @@ +summary: Ensure the system handles properly a big number of core provided connections + +details: | + Install a test snap that plugs as many core provided interfaces as is + possible and verify the command can run. This will help catch performance + issues in snapd, AppArmor,seccomp policy parsing, etc. + +environment: + CONSUMER_SNAP: test-snapd-policy-app-consumer + +prepare: | + echo "Given a snap is installed" + "$TESTSTOOLS"/snaps-state install-local "$CONSUMER_SNAP" + + # If possible, prepare a session for the test user. On many systems this + # will allow running all tests as the unprivileged user. This shields us + # from accidentally triggering any additional processes from run in the + # session of the root user and stay behind after this test terminates. + if tests.session has-session-systemd-and-dbus; then + tests.session -u test prepare + fi + +restore: | + # Remove the snaps to avoid timeout in next test + if tests.session has-session-systemd-and-dbus; then + tests.session -u test restore + fi + +execute: | + echo "For each core-provided slot" + SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" + for plugcmd in "$SNAP_MOUNT_DIR"/bin/"$CONSUMER_SNAP".* ; do + + # Just connect CONNECTIONS_PERCENTAGE of the interfaces on + # the current system + if [ -n "$CONNECTIONS_PERCENTAGE" ] && [ "$((RANDOM % (100 / CONNECTIONS_PERCENTAGE) ))" != 0 ]; then + echo "skipping plug: $plugcmd" + continue + fi + + plugcmd_bn=$(basename "$plugcmd") + plug_iface=$(echo "$plugcmd_bn" | tr '.' ':') + #shellcheck disable=SC2001 + slot_iface=$(echo "$plug_iface" | sed "s/$CONSUMER_SNAP//") + + # we test browser-support two different ways, so account for that + if [ "$plug_iface" = "$CONSUMER_SNAP:browser-sandbox" ]; then + slot_iface=":browser-support" + fi + + CONNECTED_PATTERN="$slot_iface +.*$CONSUMER_SNAP" + DISCONNECTED_PATTERN="$slot_iface +-" + + # Skip any interfaces that core doesn't ship + if ! snap interfaces | grep -E -q "$slot_iface +"; then + echo "$slot_iface not present, skipping" + continue + fi + + if [ "$plug_iface" = "$CONSUMER_SNAP:qualcomm-ipc-router" ] && ( os.query is-trusty || os.query is-xenial || os.query is-core16) ; then + # the qualcomm-ipc-router interface is known not to work on xenial, + # just check that it cannot be connected and move on + snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "cannot connect plug on system without qipcrtr socket support" + continue + fi + + if [ "$plug_iface" = "$CONSUMER_SNAP:mount-control" ] && os.query is-trusty ; then + # systemd version is too old, skipping + snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "systemd version 204 is too old \\(expected at least 209\\)" + continue + fi + + # The netlink-audit interface adds the `audit_read` capability to the + # AppArmor profile, but that's not supported on some older systems + if [ "$plug_iface" = "$CONSUMER_SNAP:netlink-audit" ] && os.query is-trusty; then + snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "cannot connect plug on system without audit_read support" + continue + fi + + echo "When slot $slot_iface is connected" + if snap interfaces | grep -E -q "$DISCONNECTED_PATTERN"; then + if [ "$slot_iface" = ":broadcom-asic-control" ] || [ "$slot_iface" = ":firewall-control" ] || [ "$slot_iface" = ":kubernetes-support" ] || [ "$slot_iface" = ":microstack-support" ] || [ "$slot_iface" = ":openvswitch-support" ] || [ "$slot_iface" = ":ppp" ]; then + # TODO: when the kmod backend no longer fails on missing + # modules, we can remove this + snap connect "$plug_iface" "$slot_iface" || true + else + snap connect "$plug_iface" "$slot_iface" + fi + fi + snap interfaces | MATCH "$CONNECTED_PATTERN" + + echo "Then $plugcmd should succeed" + if tests.session has-session-systemd-and-dbus; then + tests.session -u test exec "$plugcmd" | MATCH PASS + else + # If we cannot run the plug command as the test user, in the + # relative safety of the user session which gets torn down, then + # run the test directly EXCEPT when testing the desktop interface. + # + # The desktop interface causes, at minimum, XDG document portal to + # activate in the root users's session, which is not cleaned up. + # Since that interface will only be used in a real session, leaving + # it out is acceptable. + if [ "$plugcmd" != "${CONSUMER_SNAP}.desktop" ]; then + "$plugcmd" | MATCH PASS + else + echo "skipping $plugcmd on an unsupported system" + fi + fi + + echo "Finally disconnect the interface" + if [ "$DISCONNECT_INTERFACES" == true ] && snap interfaces | grep -E -q "$CONNECTED_PATTERN"; then + if [ "$plug_iface" = "$CONSUMER_SNAP:browser-sandbox" ]; then + snap disconnect "$CONSUMER_SNAP:browser-support" "$slot_iface" + else + snap disconnect "$plug_iface" "$slot_iface" + fi + fi + done + + echo "Removing the consumer snap" + # When DISCONNECT_INTERFACES = false, then all the interfaces are connected and + # are disconnected suring the snap removal + snap remove --purge "$CONSUMER_SNAP" diff -Nru snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/task.yaml snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/task.yaml --- snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,58 @@ +summary: Ensure the system handles properly a big number of snap provided connections + +details: | + Install a test snap that plugs as many snap provided interfaces as is + possible and verify the command can run. This will help catch performance + issues in snapd, AppArmor,seccomp policy parsing, etc. + +environment: + CONSUMER_SNAP: test-snapd-policy-app-consumer + +execute: | + PROVIDER_SNAP="test-snapd-policy-app-provider-classic" + if os.query is-core; then + PROVIDER_SNAP="test-snapd-policy-app-provider-core" + fi + + echo "Given a snap is installed" + "$TESTSTOOLS"/snaps-state install-local "$PROVIDER_SNAP" + "$TESTSTOOLS"/snaps-state install-local "$CONSUMER_SNAP" + + echo "For each snap-provided slot from $PROVIDER_SNAP" + SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" + for slotcmd in "$SNAP_MOUNT_DIR"/bin/"$PROVIDER_SNAP".* ; do + slotcmd_bn=$(basename "$slotcmd") + slot_iface=$(echo "$slotcmd_bn" | tr '.' ':') + + #shellcheck disable=SC2001 + plugcmd=$(echo "$slotcmd" | sed "s/$PROVIDER_SNAP/$CONSUMER_SNAP/") + plugcmd_bn=$(basename "$plugcmd") + plug_iface=$(echo "$plugcmd_bn" | tr '.' ':') + + CONNECTED_PATTERN="$slot_iface +$CONSUMER_SNAP" + + echo "When slot $slot_iface is connected" + snap connect "$plug_iface" "$slot_iface" + snap interfaces | MATCH "$CONNECTED_PATTERN" + + echo "Then $slotcmd should succeed" + "$slotcmd" | MATCH PASS + + echo "Then $plugcmd should succeed" + "$plugcmd" | MATCH PASS + + echo "Finally disconnect the interface" + if [ "$DISCONNECT_INTERFACES" == true ]; then + snap disconnect "$plug_iface" "$slot_iface" + fi + done + + echo "Removing the consumer snap" + # When DISCONNECT_INTERFACES = false, then all the interfaces are connected and + # are disconnected suring the snap removal + PROVIDER_SNAP="test-snapd-policy-app-provider-classic" + if os.query is-core; then + PROVIDER_SNAP="test-snapd-policy-app-provider-core" + fi + snap remove --purge "$PROVIDER_SNAP" + snap remove --purge "$CONSUMER_SNAP" diff -Nru snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run --- snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/bin/run 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +if [ -z "$1" ]; then + echo "PASS" + exit 0 +fi + +# Also allow calling the shell directly with options +/bin/bash --norc "$@" diff -Nru snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml --- snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-classic/meta/snap.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,102 @@ +name: test-snapd-policy-app-provider-classic +version: 1.0 +summary: Test policy app for providing slot interface policy from snapd +description: Test policy app for non-implicitOnClassic slots +confinement: strict + +slots: + content-read: + interface: content + content: test-content + read: + - $SNAP/content + dbus-session: + interface: dbus + bus: session + name: test.session + dbus-system: + interface: dbus + bus: system + name: test.system + location-control: null + location-observe: null + lxd: null + maliit: null + media-hub: null + microceph: null + microovn: null + mir: null + mpris: + name: test-policy-app-provider-classic + online-accounts-service: null + storage-framework-service: null + thumbnailer-service: null + ubuntu-download-manager: null + unity8: null + unity8-calendar: null + unity8-contacts: null + +apps: + content-read: + command: bin/run + slots: [ content-read ] + dbus-session: + command: bin/run + slots: [ dbus-session ] + dbus-system: + command: bin/run + slots: [ dbus-system ] + docker: + command: bin/run + slots: [ docker ] + fwupd: + command: bin/run + slots: [ fwupd ] + location-control: + command: bin/run + slots: [ location-control ] + location-observe: + command: bin/run + slots: [ location-observe ] + lxd: + command: bin/run + slots: [ lxd ] + maliit: + command: bin/run + slots: [ maliit ] + media-hub: + command: bin/run + slots: [ media-hub ] + microceph: + command: bin/run + slots: [ microceph ] + microovn: + command: bin/run + slots: [ microovn ] + mir: + command: bin/run + slots: [ mir ] + mpris: + command: bin/run + slots: [ mpris ] + online-accounts-service: + command: bin/run + slots: [ online-accounts-service ] + storage-framework-service: + command: bin/run + slots: [ storage-framework-service ] + thumbnailer-service: + command: bin/run + slots: [ thumbnailer-service ] + ubuntu-download-manager: + command: bin/run + slots: [ ubuntu-download-manager ] + unity8: + command: bin/run + slots: [ unity8 ] + unity8-calendar: + command: bin/run + slots: [ unity8-calendar ] + unity8-contacts: + command: bin/run + slots: [ unity8-contacts ] diff -Nru snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run --- snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/bin/run 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +if [ -z "$1" ]; then + echo "PASS" + exit 0 +fi + +# Also allow calling the shell directly with options +/bin/bash --norc "$@" diff -Nru snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml --- snapd-2.62+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/interfaces-snap-provided/test-snapd-policy-app-provider-core/meta/snap.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,164 @@ +name: test-snapd-policy-app-provider-core +version: 1.0 +summary: Test policy app for providing slot interface policy from snapd +description: Test policy app for non-implicitOnCore slots +confinement: strict + +slots: + audio-playback: null + audio-record: null + avahi-control: null + avahi-observe: null + bluez: null + content-read: + interface: content + content: test-content + read: + - $SNAP/content + cups: null + cups-control: null + dbus-session: + interface: dbus + bus: session + name: test.session + dbus-system: + interface: dbus + bus: system + name: test.system + desktop: null + docker: null + fwupd: null + location-control: null + location-observe: null + lxd: null + maliit: null + media-hub: null + microceph: null + microovn: null + mir: null + modem-manager: null + network-manager: null + network-manager-observe: null + ofono: null + online-accounts-service: null + pulseaudio: null + storage-framework-service: null + thumbnailer-service: null + ubuntu-download-manager: null + udisks2: null + unity8: null + unity8-calendar: null + unity8-contacts: null + upower-observe: null + wayland: null + x11: null + +apps: + audio-playback: + command: bin/run + slots: [ audio-playback ] + audio-record: + command: bin/run + slots: [ audio-record ] + avahi-control: + command: bin/run + slots: [ avahi-control ] + avahi-observe: + command: bin/run + slots: [ avahi-observe ] + bluez: + command: bin/run + slots: [ bluez ] + content-read: + command: bin/run + slots: [ content-read ] + cups: + command: bin/run + slots: [ cups ] + cups-control: + command: bin/run + slots: [ cups-control ] + dbus-session: + command: bin/run + slots: [ dbus-session ] + dbus-system: + command: bin/run + slots: [ dbus-system ] + docker: + command: bin/run + slots: [ docker ] + fwupd: + command: bin/run + slots: [ fwupd ] + location-control: + command: bin/run + slots: [ location-control ] + location-observe: + command: bin/run + slots: [ location-observe ] + lxd: + command: bin/run + slots: [ lxd ] + maliit: + command: bin/run + slots: [ maliit ] + media-hub: + command: bin/run + slots: [ media-hub ] + microceph: + command: bin/run + slots: [ microceph ] + microovn: + command: bin/run + slots: [ microovn ] + mir: + command: bin/run + slots: [ mir ] + modem-manager: + command: bin/run + slots: [ modem-manager ] + network-manager: + command: bin/run + slots: [ network-manager ] + network-manager-observe: + command: bin/run + slots: [ network-manager-observe ] + ofono: + command: bin/run + slots: [ ofono ] + online-accounts-service: + command: bin/run + slots: [ online-accounts-service ] + pulseaudio: + command: bin/run + slots: [ pulseaudio ] + storage-framework-service: + command: bin/run + slots: [ storage-framework-service ] + thumbnailer-service: + command: bin/run + slots: [ thumbnailer-service ] + ubuntu-download-manager: + command: bin/run + slots: [ ubuntu-download-manager ] + udisks2: + command: bin/run + slots: [ udisks2 ] + unity8: + command: bin/run + slots: [ unity8 ] + unity8-calendar: + command: bin/run + slots: [ unity8-calendar ] + unity8-contacts: + command: bin/run + slots: [ unity8-contacts ] + upower-observe: + command: bin/run + slots: [ upower-observe ] + wayland: + command: bin/run + slots: [ wayland ] + x11: + command: bin/run + slots: [ x11 ] diff -Nru snapd-2.62+23.10/tests/perf/main/parallel-installs/task.yaml snapd-2.63+23.10/tests/perf/main/parallel-installs/task.yaml --- snapd-2.62+23.10/tests/perf/main/parallel-installs/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/main/parallel-installs/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,37 @@ +summary: Ensure the system handles properly a big number of installed snaps + +details: | + Install a test snap many times using parallel installs and also plugs its + interfaces. This will help catch performance issues in snapd, AppArmor, etc. + +environment: + SNAPS: jq snap-store test-snapd-tools + +prepare: | + snap set system experimental.parallel-instances=true + +restore: | + snap set system experimental.parallel-instances=null + +execute: | + if [ -z "$NUM_PARALLEL" ]; then + NUM_PARALLEL=20 + fi + + # shellcheck disable=SC2086 + for snap in $SNAPS; do + for num in $(seq "$NUM_PARALLEL"); do + snap install "$snap" "${snap}_${num}" + done + done + + NUM_SNAPS="$(echo "$SNAPS" | wc -w )" + test "$(snap list | wc -l)" -gt $(( NUM_PARALLEL * NUM_SNAPS)) + + echo "Removing all the snaps" + # shellcheck disable=SC2086 + for snap in $SNAPS; do + for num in $(seq "$NUM_PARALLEL"); do + snap remove "${snap}_${num}" + done + done diff -Nru snapd-2.62+23.10/tests/perf/nested/install-many-snaps/task.yaml snapd-2.63+23.10/tests/perf/nested/install-many-snaps/task.yaml --- snapd-2.62+23.10/tests/perf/nested/install-many-snaps/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/nested/install-many-snaps/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,29 @@ +summary: Ensure that commands run when their core provided interfaces are connected + +details: | + Install a test snap that plugs as many core provided interfaces as is + possible and verify the command can run (ie, don't test the interface + functionality itself). This will help catch things like AppArmor + policy syntax errors, seccomp policy parsing, udev querying bugs, etc. + +systems: [ubuntu-18*, ubuntu-2*] + +environment: + TEST: install-many-snaps + NUM_SNAPS: 100 + +execute: | + # Get the nested system to use + NESTED_SPREAD_SYSTEM="$(tests.nested nested-system)" + + # Get spread + SPREAD="$(tests.nested download spread)" + + # Run spread test + export SPREAD_EXTERNAL_ADDRESS=localhost:8022 + export PERF_NUM_SNAPS="$NUM_SNAPS" + "$SPREAD" external:"$NESTED_SPREAD_SYSTEM":tests/perf/main/"$TEST" |& tee spread.log + + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB/nested.sh" + nested_check_spread_results spread.log diff -Nru snapd-2.62+23.10/tests/perf/nested/install-many-snaps-no-wait/task.yaml snapd-2.63+23.10/tests/perf/nested/install-many-snaps-no-wait/task.yaml --- snapd-2.62+23.10/tests/perf/nested/install-many-snaps-no-wait/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/nested/install-many-snaps-no-wait/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,29 @@ +summary: Ensure the system handles properly a installing many snaps with --no-wait parameter + +details: | + Install different snaps many times based on arch availability. In this + scenario all the snaps are installed in parallel and the test checks + the system can handle all the installs and remains running. + + +systems: [ubuntu-18*, ubuntu-2*] + +environment: + TEST: install-many-snaps-no-wait + NUM_SNAPS: 35 + +execute: | + # Get the nested system to use + NESTED_SPREAD_SYSTEM="$(tests.nested nested-system)" + + # Get spread + SPREAD="$(tests.nested download spread)" + + # Run spread test + export SPREAD_EXTERNAL_ADDRESS=localhost:8022 + export PERF_NUM_SNAPS="$NUM_SNAPS" + "$SPREAD" external:"$NESTED_SPREAD_SYSTEM":tests/perf/main/"$TEST" |& tee spread.log + + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB/nested.sh" + nested_check_spread_results spread.log diff -Nru snapd-2.62+23.10/tests/perf/nested/interfaces-many/task.yaml snapd-2.63+23.10/tests/perf/nested/interfaces-many/task.yaml --- snapd-2.62+23.10/tests/perf/nested/interfaces-many/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/nested/interfaces-many/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,56 @@ +summary: Ensure that commands run when their core provided interfaces are connected + +details: | + Install a test snap that plugs as many core provided interfaces as is + possible and verify the command can run (ie, don't test the interface + functionality itself). This will help catch things like AppArmor + policy syntax errors, seccomp policy parsing, udev querying bugs, etc. + +systems: [ubuntu-18*, ubuntu-2*] + +environment: + TEST/100_core_connected: interfaces-core-provided + CONNECTIONS_PERCENTAGE/100_core_connected: 100 + DISCONNECT_INTERFACES/100_core_connected: false + NESTED_CPUS/100_core_connected: 1 + NESTED_MEM/100_core_connected: 768 + CPU_LOAD/100_core_connected: .5 + + TEST/100_core_disconnected: interfaces-core-provided + CONNECTIONS_PERCENTAGE/100_core_disconnected: 100 + DISCONNECT_INTERFACES/100_core_disconnected: true + NESTED_CPUS/100_core_disconnected: 1 + NESTED_MEM/100_core_disconnected: 512 + CPU_LOAD/100_core_disconnected: .6 + + TEST/100_snap_connected: interfaces-snap-provided + CONNECTIONS_PERCENTAGE/100_snap_connected: 100 + DISCONNECT_INTERFACES/100_snap_connected: false + NESTED_CPUS/100_snap_connected: 1 + NESTED_MEM/100_snap_connected: 768 + CPU_LOAD/100_snap_connected: .5 + + TEST/100_snap_disconnected: interfaces-snap-provided + CONNECTIONS_PERCENTAGE/100_snap_disconnected: 100 + DISCONNECT_INTERFACES/100_snap_disconnected: true + NESTED_CPUS/100_snap_disconnected: 1 + NESTED_MEM/100_snap_disconnected: 512 + CPU_LOAD/100_snap_disconnected: .6 + +execute: | + # Get the nested system to use + NESTED_SPREAD_SYSTEM="$(tests.nested nested-system)" + + # Get spread + SPREAD="$(tests.nested download spread)" + + # Run sprad test + export PERF_CPU_LOAD="$CPU_LOAD" + export SPREAD_EXTERNAL_ADDRESS=localhost:8022 + export PERF_CONNECTIONS_PERCENTAGE="$CONNECTIONS_PERCENTAGE" + export PERF_DISCONNECT_INTERFACES="$DISCONNECT_INTERFACES" + "$SPREAD" external:"$NESTED_SPREAD_SYSTEM":tests/perf/main/"$TEST" |& tee spread.log + + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB/nested.sh" + nested_check_spread_results spread.log diff -Nru snapd-2.62+23.10/tests/perf/nested/parallel-installs/task.yaml snapd-2.63+23.10/tests/perf/nested/parallel-installs/task.yaml --- snapd-2.62+23.10/tests/perf/nested/parallel-installs/task.yaml 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/nested/parallel-installs/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,39 @@ +summary: Ensure that commands run when their core provided interfaces are connected + +details: | + Install a test snap that plugs as many core provided interfaces as is + possible and verify the command can run (ie, don't test the interface + functionality itself). This will help catch things like AppArmor + policy syntax errors, seccomp policy parsing, udev querying bugs, etc. + +systems: [ubuntu-18*, ubuntu-2*] + +environment: + TEST: parallel-installs + + NUM_PARALLEL/20_installs: 20 + NESTED_CPUS/20_installs: 1 + NESTED_MEM/20_installs: 768 + CPU_LOAD/20_installs: .6 + + NUM_PARALLEL/30_installs: 30 + NESTED_CPUS/30_installs: 1 + NESTED_MEM/30_installs: 1024 + CPU_LOAD/30_installs: .5 + +execute: | + # Get the nested system to use + NESTED_SPREAD_SYSTEM="$(tests.nested nested-system)" + + # Get spread + SPREAD="$(tests.nested download spread)" + + # Run sprad test + export PERF_CPU_LOAD="$CPU_LOAD" + export SPREAD_EXTERNAL_ADDRESS=localhost:8022 + export PERF_NUM_PARALLEL="$NUM_PARALLEL" + "$SPREAD" external:"$NESTED_SPREAD_SYSTEM":tests/perf/main/"$TEST" |& tee spread.log + + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB/nested.sh" + nested_check_spread_results spread.log diff -Nru snapd-2.62+23.10/tests/perf/parallel-installs/task.yaml snapd-2.63+23.10/tests/perf/parallel-installs/task.yaml --- snapd-2.62+23.10/tests/perf/parallel-installs/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/perf/parallel-installs/task.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -summary: Ensure the system handles properly a big number of installed snaps - -details: | - Install a test snap many times using parallel installs and also plugs its - interfaces. This will help catch performance issues in snapd, AppArmor, etc. - -environment: - SNAPS: jq snap-store test-snapd-tools - -prepare: | - snap set system experimental.parallel-instances=true - -restore: | - snap set system experimental.parallel-instances=null - -execute: | - if [ -z "$NUM_PARALLEL" ]; then - NUM_PARALLEL=20 - fi - - # shellcheck disable=SC2086 - for snap in $SNAPS; do - for num in $(seq "$NUM_PARALLEL"); do - snap install "$snap" "${snap}_${num}" - done - done - - NUM_SNAPS="$(echo "$SNAPS" | wc -w )" - test "$(snap list | wc -l)" -gt $(( NUM_PARALLEL * NUM_SNAPS)) - - echo "Removing all the snaps" - # shellcheck disable=SC2086 - for snap in $SNAPS; do - for num in $(seq "$NUM_PARALLEL"); do - snap remove "${snap}_${num}" - done - done diff -Nru snapd-2.62+23.10/tests/regression/lp-1813365/task.yaml snapd-2.63+23.10/tests/regression/lp-1813365/task.yaml --- snapd-2.62+23.10/tests/regression/lp-1813365/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/regression/lp-1813365/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -1,14 +1,25 @@ summary: Regression check for https://bugs.launchpad.net/snapd/+bug/1813365 +details: | + Check security issue related to local privilege escalation via snapd socket + systems: [ubuntu-1*, ubuntu-2*, ubuntu-core-*, debian-*] prepare: | - mount --bind logger "$(command -v adduser)" + if os.query is-core-ge 24; then + mount --bind logger "$(command -v useradd)" + else + mount --bind logger "$(command -v adduser)" + fi mount --bind logger "$(command -v passwd)" mount --bind logger "$(command -v usermod)" restore: | - umount "$(command -v adduser)" + if os.query is-core-ge 24; then + umount "$(command -v useradd)" + else + umount "$(command -v adduser)" + fi umount "$(command -v passwd)" umount "$(command -v usermod)" rm -f /tmp/logger.log diff -Nru snapd-2.62+23.10/tests/regression/lp-1848567/task.yaml snapd-2.63+23.10/tests/regression/lp-1848567/task.yaml --- snapd-2.62+23.10/tests/regression/lp-1848567/task.yaml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/tests/regression/lp-1848567/task.yaml 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,8 @@ snap connect test-snapd-app:sound-themes test-snapd-gtk-common-themes:sound-themes execute: | - if snap debug sandbox-features --required apparmor:kernel:mount; then + # When using the internal parser, the profile may include features which are not yet supported by the host parser. + if snap debug sandbox-features --required apparmor:kernel:mount && ! snap debug sandbox-features --required apparmor:parser:snapd-internal; then # Re-compile the apparmor profile for snap-update-ns for the # test-snapd-app snap while ensuring that the profile is not loaded # into kernel memory and that the compiler is not using any existing diff -Nru snapd-2.62+23.10/testutil/exec.go snapd-2.63+23.10/testutil/exec.go --- snapd-2.62+23.10/testutil/exec.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/testutil/exec.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bytes" "fmt" "io" - "io/ioutil" "os" "os/exec" "path" @@ -193,7 +192,7 @@ // {"cmd", "arg1", "arg2"}, // second invocation of "cmd" // } func (cmd *MockCmd) Calls() [][]string { - raw, err := ioutil.ReadFile(cmd.logFile) + raw, err := os.ReadFile(cmd.logFile) if os.IsNotExist(err) { return nil } diff -Nru snapd-2.62+23.10/testutil/exec_test.go snapd-2.63+23.10/testutil/exec_test.go --- snapd-2.62+23.10/testutil/exec_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/testutil/exec_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -90,11 +89,11 @@ {"shellcheck", "-s", "bash", "-"}, }) - scriptData, err := ioutil.ReadFile(mock.Exe()) + scriptData, err := os.ReadFile(mock.Exe()) c.Assert(err, check.IsNil) c.Assert(string(scriptData), Contains, "\necho some-command\n") - data, err := ioutil.ReadFile(filepath.Join(tmpDir, "input")) + data, err := os.ReadFile(filepath.Join(tmpDir, "input")) c.Assert(err, check.IsNil) c.Assert(data, check.DeepEquals, scriptData) } diff -Nru snapd-2.62+23.10/testutil/filecontentchecker.go snapd-2.63+23.10/testutil/filecontentchecker.go --- snapd-2.62+23.10/testutil/filecontentchecker.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/testutil/filecontentchecker.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,7 @@ import ( "bytes" "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -77,7 +77,7 @@ } func fileContentCheck(filename string, content interface{}, exact bool) (result bool, error string) { - buf, err := ioutil.ReadFile(filename) + buf, err := os.ReadFile(filename) if err != nil { return false, fmt.Sprintf("Cannot read file %q: %v", filename, err) } @@ -93,7 +93,7 @@ result = presentableBuf == content.String() case FileContentRef: referenceFilename := string(content) - reference, err := ioutil.ReadFile(referenceFilename) + reference, err := os.ReadFile(referenceFilename) if err != nil { return false, fmt.Sprintf("Cannot read reference file %q: %v", referenceFilename, err) } diff -Nru snapd-2.62+23.10/testutil/lowlevel.go snapd-2.63+23.10/testutil/lowlevel.go --- snapd-2.62+23.10/testutil/lowlevel.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/testutil/lowlevel.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,6 +21,7 @@ import ( "fmt" + "io/fs" "os" "strings" "syscall" @@ -55,6 +56,27 @@ return &fakeFileInfo{name: name, mode: mode} } +// fakeDirEntry implements fs.DirEntry for testing. +type fakeDirEntry struct { + name string + mode os.FileMode +} + +func (de *fakeDirEntry) Name() string { return de.name } +func (de *fakeDirEntry) Type() fs.FileMode { return de.mode.Type() } +func (de *fakeDirEntry) IsDir() bool { return de.Type().IsDir() } +func (de *fakeDirEntry) Info() (fs.FileInfo, error) { + return &fakeFileInfo{ + name: de.Name(), + mode: de.mode, + }, nil +} + +// FakeDirEntry returns a fake object implementing fs.DirEntry +func FakeDirEntry(name string, mode os.FileMode) fs.DirEntry { + return &fakeDirEntry{name: name, mode: mode} +} + // Convenient FakeFileInfo objects for InsertLstatResult var ( FileInfoFile = &fakeFileInfo{} @@ -160,7 +182,7 @@ sysLstats map[string]syscall.Stat_t fstats map[string]syscall.Stat_t fstatfses map[string]func() syscall.Statfs_t - readdirs map[string][]os.FileInfo + readdirs map[string][]fs.DirEntry readlinkats map[string]string // allocated file descriptors fds map[int]string @@ -467,15 +489,15 @@ } // InsertReadDirResult makes given subsequent call readdir return the specified fake file infos. -func (sys *SyscallRecorder) InsertReadDirResult(call string, infos []os.FileInfo) { +func (sys *SyscallRecorder) InsertReadDirResult(call string, infos []fs.DirEntry) { if sys.readdirs == nil { - sys.readdirs = make(map[string][]os.FileInfo) + sys.readdirs = make(map[string][]fs.DirEntry) } sys.readdirs[call] = infos } // ReadDir is a fake implementation of os.ReadDir -func (sys *SyscallRecorder) ReadDir(dirname string) ([]os.FileInfo, error) { +func (sys *SyscallRecorder) ReadDir(dirname string) ([]fs.DirEntry, error) { call := fmt.Sprintf("readdir %q", dirname) val, err := sys.rcall(call, func(call string) (interface{}, error) { if fi, ok := sys.readdirs[call]; ok { @@ -484,7 +506,7 @@ panic(fmt.Sprintf("one of InsertReadDirResult() or InsertFault() for %s must be used", call)) }) if err == nil { - return val.([]os.FileInfo), nil + return val.([]fs.DirEntry), nil } return nil, err } diff -Nru snapd-2.62+23.10/testutil/lowlevel_test.go snapd-2.63+23.10/testutil/lowlevel_test.go --- snapd-2.62+23.10/testutil/lowlevel_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/testutil/lowlevel_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,6 +21,7 @@ import ( "fmt" + "io/fs" "os" "syscall" @@ -40,9 +41,11 @@ } func (s *lowLevelSuite) TestFakeFileInfo(c *check.C) { - ffi := testutil.FakeFileInfo("name", 0755) + ffi := testutil.FakeDirEntry("name", 0755) c.Assert(ffi.Name(), check.Equals, "name") - c.Assert(ffi.Mode(), check.Equals, os.FileMode(0755)) + fi, err := ffi.Info() + c.Assert(err, check.IsNil) + c.Assert(fi.Mode().Perm(), check.Equals, os.FileMode(0755)) c.Assert(testutil.FileInfoFile.Mode().IsDir(), check.Equals, false) c.Assert(testutil.FileInfoFile.Mode().IsRegular(), check.Equals, true) @@ -625,9 +628,9 @@ } func (s *lowLevelSuite) TestReadDirSuccess(c *check.C) { - files := []os.FileInfo{ - testutil.FakeFileInfo("file", 0644), - testutil.FakeFileInfo("dir", 0755|os.ModeDir), + files := []fs.DirEntry{ + testutil.FakeDirEntry("file", 0644), + testutil.FakeDirEntry("dir", 0755|os.ModeDir), } s.sys.InsertReadDirResult(`readdir "/foo"`, files) files, err := s.sys.ReadDir("/foo") diff -Nru snapd-2.62+23.10/timeutil/synchronized_test.go snapd-2.63+23.10/timeutil/synchronized_test.go --- snapd-2.62+23.10/timeutil/synchronized_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/timeutil/synchronized_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,6 +22,7 @@ import ( "errors" "fmt" + "sync" "github.com/godbus/dbus" . "gopkg.in/check.v1" @@ -41,6 +42,7 @@ NTPSynchronized bool + m sync.Mutex getPropertyCalled []string } @@ -69,7 +71,7 @@ } func (server *mockTimedate1) Export() { - server.conn.Export(timedate1Api{server}, timedate1ObjectPath, "org.freedesktop.DBus.Properties") + server.conn.Export(&timedate1Api{server}, timedate1ObjectPath, "org.freedesktop.DBus.Properties") } func (server *mockTimedate1) Stop() error { @@ -79,11 +81,22 @@ return server.conn.Close() } +func (server *mockTimedate1) reset(ntpSynchronized bool) { + server.m.Lock() + defer server.m.Unlock() + + server.NTPSynchronized = ntpSynchronized + server.getPropertyCalled = nil +} + type timedate1Api struct { server *mockTimedate1 } -func (a timedate1Api) Get(iff, prop string) (dbus.Variant, *dbus.Error) { +func (a *timedate1Api) Get(iff, prop string) (dbus.Variant, *dbus.Error) { + a.server.m.Lock() + defer a.server.m.Unlock() + a.server.getPropertyCalled = append(a.server.getPropertyCalled, fmt.Sprintf("if=%s;prop=%s", iff, prop)) return dbus.MakeVariant(a.server.NTPSynchronized), nil } @@ -115,16 +128,19 @@ backend.Export() for _, v := range []bool{true, false} { - backend.getPropertyCalled = nil - backend.NTPSynchronized = v + backend.reset(v) synced, err := timeutil.IsNTPSynchronized() c.Assert(err, IsNil) c.Check(synced, Equals, v) - c.Check(backend.getPropertyCalled, DeepEquals, []string{ - "if=org.freedesktop.timedate1;prop=NTPSynchronized", - }) + func() { + backend.m.Lock() + defer backend.m.Unlock() + c.Check(backend.getPropertyCalled, DeepEquals, []string{ + "if=org.freedesktop.timedate1;prop=NTPSynchronized", + }) + }() } } diff -Nru snapd-2.62+23.10/usersession/agent/rest_api.go snapd-2.63+23.10/usersession/agent/rest_api.go --- snapd-2.62+23.10/usersession/agent/rest_api.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/usersession/agent/rest_api.go 2024-04-24 00:00:39.000000000 +0000 @@ -88,13 +88,7 @@ } return SyncResponse(m) } - -type serviceInstruction struct { - Action string `json:"action"` - Services []string `json:"services"` -} - -func serviceStart(inst *serviceInstruction, sysd systemd.Systemd) Response { +func serviceStart(inst *client.ServiceInstruction, sysd systemd.Systemd) Response { // Refuse to start non-snap services for _, service := range inst.Services { if !strings.HasPrefix(service, "snap.") { @@ -137,7 +131,7 @@ }) } -func serviceRestart(inst *serviceInstruction, sysd systemd.Systemd) Response { +func serviceRestart(inst *client.ServiceInstruction, sysd systemd.Systemd) Response { // Refuse to restart non-snap services for _, service := range inst.Services { if !strings.HasPrefix(service, "snap.") { @@ -147,8 +141,14 @@ restartErrors := make(map[string]string) for _, service := range inst.Services { - if err := sysd.Restart([]string{service}); err != nil { - restartErrors[service] = err.Error() + if inst.Reload { + if err := sysd.ReloadOrRestart([]string{service}); err != nil { + restartErrors[service] = err.Error() + } + } else { + if err := sysd.Restart([]string{service}); err != nil { + restartErrors[service] = err.Error() + } } } if len(restartErrors) == 0 { @@ -167,37 +167,7 @@ }) } -func serviceReloadOrRestart(inst *serviceInstruction, sysd systemd.Systemd) Response { - // Refuse to reload/restart non-snap services - for _, service := range inst.Services { - if !strings.HasPrefix(service, "snap.") { - return InternalError("cannot restart non-snap service %v", service) - } - } - - restartErrors := make(map[string]string) - for _, service := range inst.Services { - if err := sysd.ReloadOrRestart([]string{service}); err != nil { - restartErrors[service] = err.Error() - } - } - if len(restartErrors) == 0 { - return SyncResponse(nil) - } - return SyncResponse(&resp{ - Type: ResponseTypeError, - Status: 500, - Result: &errorResult{ - Message: "some user services failed to restart or reload", - Kind: errorKindServiceControl, - Value: map[string]interface{}{ - "restart-errors": restartErrors, - }, - }, - }) -} - -func serviceStop(inst *serviceInstruction, sysd systemd.Systemd) Response { +func serviceStop(inst *client.ServiceInstruction, sysd systemd.Systemd) Response { // Refuse to stop non-snap services for _, service := range inst.Services { if !strings.HasPrefix(service, "snap.") { @@ -227,7 +197,7 @@ }) } -func serviceDaemonReload(inst *serviceInstruction, sysd systemd.Systemd) Response { +func serviceDaemonReload(inst *client.ServiceInstruction, sysd systemd.Systemd) Response { if len(inst.Services) != 0 { return InternalError("daemon-reload should not be called with any services") } @@ -237,12 +207,11 @@ return SyncResponse(nil) } -var serviceInstructionDispTable = map[string]func(*serviceInstruction, systemd.Systemd) Response{ - "start": serviceStart, - "stop": serviceStop, - "restart": serviceRestart, - "reload-or-restart": serviceReloadOrRestart, - "daemon-reload": serviceDaemonReload, +var serviceInstructionDispTable = map[string]func(*client.ServiceInstruction, systemd.Systemd) Response{ + "start": serviceStart, + "stop": serviceStop, + "restart": serviceRestart, + "daemon-reload": serviceDaemonReload, } var systemdLock sync.Mutex @@ -276,7 +245,7 @@ } decoder := json.NewDecoder(r.Body) - var inst serviceInstruction + var inst client.ServiceInstruction if err := decoder.Decode(&inst); err != nil { return BadRequest("cannot decode request body into service instruction: %v", err) } diff -Nru snapd-2.62+23.10/usersession/agent/rest_api_test.go snapd-2.63+23.10/usersession/agent/rest_api_test.go --- snapd-2.62+23.10/usersession/agent/rest_api_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/usersession/agent/rest_api_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -448,7 +448,7 @@ } func (s *restSuite) TestServicesRestartOrReload(c *C) { - req := httptest.NewRequest("POST", "/v1/service-control", bytes.NewBufferString(`{"action":"reload-or-restart","services":["snap.foo.service", "snap.bar.service"]}`)) + req := httptest.NewRequest("POST", "/v1/service-control", bytes.NewBufferString(`{"action":"restart", "reload":true,"services":["snap.foo.service", "snap.bar.service"]}`)) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() agent.ServiceControlCmd.POST(agent.ServiceControlCmd, req).ServeHTTP(rec, req) @@ -467,7 +467,7 @@ } func (s *restSuite) TestServicesRestartOrReloadNonSnap(c *C) { - req := httptest.NewRequest("POST", "/v1/service-control", bytes.NewBufferString(`{"action":"reload-or-restart","services":["snap.foo.service", "not-snap.bar.service"]}`)) + req := httptest.NewRequest("POST", "/v1/service-control", bytes.NewBufferString(`{"action":"restart", "reload":true,"services":["snap.foo.service", "not-snap.bar.service"]}`)) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() agent.ServiceControlCmd.POST(agent.ServiceControlCmd, req).ServeHTTP(rec, req) @@ -499,7 +499,7 @@ }) defer restore() - req := httptest.NewRequest("POST", "/v1/service-control", bytes.NewBufferString(`{"action":"reload-or-restart","services":["snap.foo.service", "snap.bar.service"]}`)) + req := httptest.NewRequest("POST", "/v1/service-control", bytes.NewBufferString(`{"action":"restart", "reload":true,"services":["snap.foo.service", "snap.bar.service"]}`)) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() agent.ServiceControlCmd.POST(agent.ServiceControlCmd, req).ServeHTTP(rec, req) @@ -511,7 +511,7 @@ c.Check(rsp.Type, Equals, agent.ResponseTypeError) c.Check(rsp.Result, DeepEquals, map[string]interface{}{ "kind": "service-control", - "message": "some user services failed to restart or reload", + "message": "some user services failed to restart", "value": map[string]interface{}{ "restart-errors": map[string]interface{}{ "snap.bar.service": "mock systemctl error", diff -Nru snapd-2.62+23.10/usersession/client/client.go snapd-2.63+23.10/usersession/client/client.go --- snapd-2.62+23.10/usersession/client/client.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/usersession/client/client.go 2024-04-24 00:00:39.000000000 +0000 @@ -25,7 +25,6 @@ "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -36,6 +35,7 @@ "time" "github.com/snapcore/snapd/dirs" + "github.com/snapcore/snapd/systemd" ) // dialSessionAgent connects to a user's session agent @@ -53,7 +53,7 @@ type Client struct { doer *http.Client - uids []int + uids map[int]bool } func New() *Client { @@ -67,7 +67,10 @@ // only. func NewForUids(uids ...int) *Client { cli := New() - cli.uids = append(cli.uids, uids...) + cli.uids = make(map[int]bool, len(uids)) + for _, uid := range uids { + cli.uids[uid] = true + } return cli } @@ -104,20 +107,12 @@ } } -func (client *Client) sendRequest(ctx context.Context, socket string, method, urlpath string, query url.Values, headers map[string]string, body []byte) *response { - uidStr := filepath.Base(filepath.Dir(socket)) - uid, err := strconv.Atoi(uidStr) - if err != nil { - // Ignore directories that do not - // appear to be valid XDG runtime dirs - // (i.e. /run/user/NNNN). - return nil - } +func (client *Client) sendRequest(ctx context.Context, uid int, method, urlpath string, query url.Values, headers map[string]string, body []byte) *response { response := &response{uid: uid} u := url.URL{ Scheme: "http", - Host: uidStr, + Host: fmt.Sprintf("%d", uid), Path: urlpath, RawQuery: query.Encode(), } @@ -142,13 +137,45 @@ return response } +func (client *Client) uidIsValidAsTarget(uid int) bool { + // if uids are provided (i.e there must be entries, otherwise + // no list is there), then there must be an entry + if len(client.uids) > 0 { + return client.uids[uid] + } + return true +} + +func (client *Client) sessionTargets() ([]int, error) { + sockets, err := filepath.Glob(filepath.Join(dirs.XdgRuntimeDirGlob, "snapd-session-agent.socket")) + if err != nil { + return nil, err + } + + uids := make([]int, 0, len(client.uids)) + for _, sock := range sockets { + uidStr := filepath.Base(filepath.Dir(sock)) + uid, err := strconv.Atoi(uidStr) + if err != nil { + // Ignore directories that do not + // appear to be valid XDG runtime dirs + // (i.e. /run/user/NNNN). + continue + } + if client.uidIsValidAsTarget(uid) { + uids = append(uids, uid) + } + } + return uids, nil +} + // doMany sends the given request to all active user sessions or a subset of them // defined by optional client.uids field. Please be careful when using this // method, because it is not aware of the physical user who triggered the request // and blindly forwards it to all logged in users. Some of them might not have // the right to see the request (let alone to respond to it). func (client *Client) doMany(ctx context.Context, method, urlpath string, query url.Values, headers map[string]string, body []byte) ([]*response, error) { - sockets, err := filepath.Glob(filepath.Join(dirs.XdgRuntimeDirGlob, "snapd-session-agent.socket")) + uids, err := client.sessionTargets() if err != nil { return nil, err } @@ -158,35 +185,17 @@ responses []*response ) - var uids map[string]bool - if len(client.uids) > 0 { - uids = make(map[string]bool) - for _, uid := range client.uids { - uids[fmt.Sprintf("%d", uid)] = true - } - } - - for _, socket := range sockets { - // filter out sockets based on uids - if len(uids) > 0 { - // XXX: alternatively we could Stat() the socket and - // and check Uid field of stat.Sys().(*syscall.Stat_t), but it's - // more annyoing to unit-test. - userPart := filepath.Base(filepath.Dir(socket)) - if !uids[userPart] { - continue - } - } + for _, uid := range uids { wg.Add(1) - go func(socket string) { + go func(uid int) { defer wg.Done() - response := client.sendRequest(ctx, socket, method, urlpath, query, headers, body) + response := client.sendRequest(ctx, uid, method, urlpath, query, headers, body) if response != nil { mu.Lock() defer mu.Unlock() responses = append(responses, response) } - }(socket) + }(uid) } wg.Wait() return responses, nil @@ -196,7 +205,7 @@ dec := json.NewDecoder(reader) if err := dec.Decode(v); err != nil { r := dec.Buffered() - buf, err1 := ioutil.ReadAll(r) + buf, err1 := io.ReadAll(r) if err1 != nil { buf = []byte(fmt.Sprintf("error reading buffered response body: %s", err1)) } @@ -265,19 +274,25 @@ return failures, err } -func (client *Client) serviceControlCall(ctx context.Context, action string, services []string) (startFailures, stopFailures []ServiceFailure, err error) { - headers := map[string]string{"Content-Type": "application/json"} - reqBody, err := json.Marshal(map[string]interface{}{ - "action": action, - "services": services, - }) - if err != nil { - return nil, nil, err - } - responses, err := client.doMany(ctx, "POST", "/v1/service-control", nil, headers, reqBody) - if err != nil { - return nil, nil, err - } +// ServiceInstruction is the json representation of possible arguments +// for the user session rest api to control services. Arguments allowed for +// start/stop/restart are all listed here, and closely reflect possible arguments +// for similar options in the wrappers package. +type ServiceInstruction struct { + Action string `json:"action"` + Services []string `json:"services,omitempty"` + + // StartServices options + Enable bool `json:"enable,omitempty"` + + // StopServices options + Disable bool `json:"disable,omitempty"` + + // RestartServices options + Reload bool `json:"reload,omitempty"` +} + +func (client *Client) decodeControlResponses(responses []*response) (startFailures, stopFailures []ServiceFailure, err error) { for _, resp := range responses { if agentErr, ok := resp.err.(*Error); ok && agentErr.Kind == "service-control" { if errorValue, ok := agentErr.Value.(map[string]interface{}); ok { @@ -298,27 +313,127 @@ return startFailures, stopFailures, err } +func (client *Client) serviceControlCall(ctx context.Context, inst *ServiceInstruction) (startFailures, stopFailures []ServiceFailure, err error) { + headers := map[string]string{"Content-Type": "application/json"} + reqBody, err := json.Marshal(inst) + if err != nil { + return nil, nil, err + } + responses, err := client.doMany(ctx, "POST", "/v1/service-control", nil, headers, reqBody) + if err != nil { + return nil, nil, err + } + return client.decodeControlResponses(responses) +} + func (client *Client) ServicesDaemonReload(ctx context.Context) error { - _, _, err := client.serviceControlCall(ctx, "daemon-reload", nil) + _, _, err := client.serviceControlCall(ctx, &ServiceInstruction{ + Action: "daemon-reload", + }) return err } -func (client *Client) ServicesStart(ctx context.Context, services []string) (startFailures, stopFailures []ServiceFailure, err error) { - return client.serviceControlCall(ctx, "start", services) +func filterDisabledServices(all, disabled []string) []string { + var filtered []string +ServiceLoop: + for _, svc := range all { + for _, disabledSvc := range disabled { + if strings.Contains(svc, disabledSvc) { + continue ServiceLoop + } + } + filtered = append(filtered, svc) + } + return filtered } -func (client *Client) ServicesStop(ctx context.Context, services []string) (stopFailures []ServiceFailure, err error) { - _, stopFailures, err = client.serviceControlCall(ctx, "stop", services) - return stopFailures, err +type ClientServicesStartOptions struct { + // Enable determines whether the service should be enabled before + // its being started. + Enable bool + // DisabledServices is a list of services per-uid that can be provided + // which will then be ignored for the start or enable operation. + DisabledServices map[int][]string +} + +// ServicesStop attempts to start the services in `services`. +// If the enable flag is provided, then services listed will also be +// enabled. +// If the map of disabled services is set, then on a per-uid basis the services +// listed in `services` can be filtered out. +func (client *Client) ServicesStart(ctx context.Context, services []string, opts ClientServicesStartOptions) (startFailures, stopFailures []ServiceFailure, err error) { + // If no disabled services lists are provided, then we do not need to filter out services + // per-user. In this case lets + if len(opts.DisabledServices) == 0 { + return client.serviceControlCall(ctx, &ServiceInstruction{ + Action: "start", + Services: services, + Enable: opts.Enable, + }) + } + + // Otherwise we do a bit of manual request building based on the uids we need to filter + // services out for. + uids, err := client.sessionTargets() + if err != nil { + return nil, nil, err + } + var ( + wg sync.WaitGroup + mu sync.Mutex + responses []*response + ) + + for _, uid := range uids { + headers := map[string]string{"Content-Type": "application/json"} + filtered := filterDisabledServices(services, opts.DisabledServices[uid]) + if len(filtered) == 0 { + // Save an expensive call + continue + } + reqBody, err := json.Marshal(&ServiceInstruction{ + Action: "start", + Services: filtered, + Enable: opts.Enable, + }) + if err != nil { + return nil, nil, err + } + wg.Add(1) + go func(uid int) { + defer wg.Done() + response := client.sendRequest(ctx, uid, "POST", "/v1/service-control", nil, headers, reqBody) + if response != nil { + mu.Lock() + defer mu.Unlock() + responses = append(responses, response) + } + }(uid) + } + wg.Wait() + return client.decodeControlResponses(responses) } -func (client *Client) ServicesRestart(ctx context.Context, services []string) (restartFailures []ServiceFailure, err error) { - restartFailures, _, err = client.serviceControlCall(ctx, "restart", services) - return restartFailures, err +// ServicesStop attempts to stop the services in `services`. +// If the disable flag is set then the services listed also will +// be disabled. +func (client *Client) ServicesStop(ctx context.Context, services []string, disable bool) (stopFailures []ServiceFailure, err error) { + _, stopFailures, err = client.serviceControlCall(ctx, &ServiceInstruction{ + Action: "stop", + Services: services, + Disable: disable, + }) + return stopFailures, err } -func (client *Client) ServicesReloadOrRestart(ctx context.Context, services []string) (restartFailures []ServiceFailure, err error) { - restartFailures, _, err = client.serviceControlCall(ctx, "reload-or-restart", services) +// ServicesRestart attempts to restart or reload active services in `services`. +// If the reload flag is set then "systemctl reload-or-restart" is attempted. +func (client *Client) ServicesRestart(ctx context.Context, services []string, reload bool) (restartFailures []ServiceFailure, err error) { + restartFailures, _, err = client.serviceControlCall(ctx, &ServiceInstruction{ + Action: "restart", + Services: services, + Reload: reload, + }) return restartFailures, err } @@ -334,6 +449,19 @@ NeedDaemonReload bool `json:"needs-reload"` } +func (us *ServiceUnitStatus) SystemdUnitStatus() *systemd.UnitStatus { + return &systemd.UnitStatus{ + Daemon: us.Daemon, + Id: us.Id, + Name: us.Name, + Names: us.Names, + Enabled: us.Enabled, + Active: us.Active, + Installed: us.Installed, + NeedDaemonReload: us.NeedDaemonReload, + } +} + func (client *Client) ServiceStatus(ctx context.Context, services []string) (map[int][]ServiceUnitStatus, map[int][]ServiceFailure, error) { q := make(url.Values) q.Add("services", strings.Join(services, ",")) diff -Nru snapd-2.62+23.10/usersession/client/client_test.go snapd-2.63+23.10/usersession/client/client_test.go --- snapd-2.62+23.10/usersession/client/client_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/usersession/client/client_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,8 +21,9 @@ import ( "context" + "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "os" @@ -234,12 +235,49 @@ "result": null }`)) }) - startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service", "service2.service"}) + startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service", "service2.service"}, client.ClientServicesStartOptions{}) c.Assert(err, IsNil) c.Check(startFailures, HasLen, 0) c.Check(stopFailures, HasLen, 0) } +func (s *clientSuite) TestServicesStartWithDisabledServices(c *C) { + var n int32 + s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddInt32(&n, 1) + decoder := json.NewDecoder(r.Body) + var inst client.ServiceInstruction + c.Assert(decoder.Decode(&inst), IsNil) + if r.Host == "42" { + c.Check(inst.Services, DeepEquals, []string{"service2.service"}) + } else if r.Host == "1000" { + c.Check(inst.Services, DeepEquals, []string{"service1.service"}) + } else { + c.FailNow() + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + w.Write([]byte(`{ + "type": "sync", + "result": null +}`)) + }) + startFailures, stopFailures, err := s.cli.ServicesStart( + context.Background(), + []string{"service1.service", "service2.service"}, + client.ClientServicesStartOptions{ + DisabledServices: map[int][]string{ + 42: {"service1.service"}, + 1000: {"service2.service"}, + }, + }, + ) + c.Assert(err, IsNil) + c.Check(startFailures, HasLen, 0) + c.Check(stopFailures, HasLen, 0) + c.Check(atomic.LoadInt32(&n), Equals, int32(2)) +} + func (s *clientSuite) TestServicesStartFailure(c *C) { s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") @@ -257,7 +295,7 @@ } }`)) }) - startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service", "service2.service"}) + startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service", "service2.service"}, client.ClientServicesStartOptions{}) c.Assert(err, ErrorMatches, "failed to start services") c.Check(startFailures, HasLen, 2) c.Check(stopFailures, HasLen, 0) @@ -304,7 +342,7 @@ } }`)) }) - startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service", "service2.service"}) + startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service", "service2.service"}, client.ClientServicesStartOptions{}) c.Assert(err, ErrorMatches, "failed to start services") c.Check(startFailures, DeepEquals, []client.ServiceFailure{ { @@ -341,14 +379,14 @@ // Error value is not a map errorValue = "[]" - startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service"}) + startFailures, stopFailures, err := s.cli.ServicesStart(context.Background(), []string{"service1.service"}, client.ClientServicesStartOptions{}) c.Check(err, ErrorMatches, "failed to stop services") c.Check(startFailures, HasLen, 0) c.Check(stopFailures, HasLen, 0) // Error value is a map, but missing start-errors/stop-errors keys errorValue = "{}" - startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}) + startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}, client.ClientServicesStartOptions{}) c.Check(err, ErrorMatches, "failed to stop services") c.Check(startFailures, HasLen, 0) c.Check(stopFailures, HasLen, 0) @@ -358,7 +396,7 @@ "start-errors": [], "stop-errors": 42 }` - startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}) + startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}, client.ClientServicesStartOptions{}) c.Check(err, ErrorMatches, "failed to stop services") c.Check(startFailures, HasLen, 0) c.Check(stopFailures, HasLen, 0) @@ -372,7 +410,7 @@ "service1.service": {} } }` - startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}) + startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}, client.ClientServicesStartOptions{}) c.Check(err, ErrorMatches, "failed to stop services") c.Check(startFailures, HasLen, 0) c.Check(stopFailures, HasLen, 0) @@ -386,7 +424,7 @@ "service2.service": 42 } }` - startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}) + startFailures, stopFailures, err = s.cli.ServicesStart(context.Background(), []string{"service1.service"}, client.ClientServicesStartOptions{}) c.Check(err, ErrorMatches, "failed to stop services") c.Check(startFailures, DeepEquals, []client.ServiceFailure{ { @@ -407,7 +445,7 @@ "result": null }`)) }) - failures, err := s.cli.ServicesStop(context.Background(), []string{"service1.service", "service2.service"}) + failures, err := s.cli.ServicesStop(context.Background(), []string{"service1.service", "service2.service"}, false) c.Assert(err, IsNil) c.Check(failures, HasLen, 0) } @@ -429,7 +467,7 @@ } }`)) }) - failures, err := s.cli.ServicesStop(context.Background(), []string{"service1.service", "service2.service"}) + failures, err := s.cli.ServicesStop(context.Background(), []string{"service1.service", "service2.service"}, false) c.Assert(err, ErrorMatches, "failed to stop services") c.Check(failures, HasLen, 2) failure0 := failures[0] @@ -458,7 +496,7 @@ "result": null }`)) }) - failures, err := s.cli.ServicesRestart(context.Background(), []string{"service1.service", "service2.service"}) + failures, err := s.cli.ServicesRestart(context.Background(), []string{"service1.service", "service2.service"}, false) c.Assert(err, IsNil) c.Check(failures, HasLen, 0) } @@ -480,7 +518,7 @@ } }`)) }) - failures, err := s.cli.ServicesRestart(context.Background(), []string{"service1.service", "service2.service"}) + failures, err := s.cli.ServicesRestart(context.Background(), []string{"service1.service", "service2.service"}, false) c.Assert(err, ErrorMatches, "failed to restart services") c.Check(failures, HasLen, 2) failure0 := failures[0] @@ -500,7 +538,7 @@ }) } -func (s *clientSuite) TestServicesRestartOrReload(c *C) { +func (s *clientSuite) TestServicesRestartWithReload(c *C) { s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(200) @@ -509,12 +547,12 @@ "result": null }`)) }) - failures, err := s.cli.ServicesReloadOrRestart(context.Background(), []string{"service1.service", "service2.service"}) + failures, err := s.cli.ServicesRestart(context.Background(), []string{"service1.service", "service2.service"}, true) c.Assert(err, IsNil) c.Check(failures, HasLen, 0) } -func (s *clientSuite) TestServicesRestartOrReloadFailure(c *C) { +func (s *clientSuite) TestServicesRestartWithReloadFailure(c *C) { s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(500) @@ -531,7 +569,7 @@ } }`)) }) - failures, err := s.cli.ServicesReloadOrRestart(context.Background(), []string{"service1.service", "service2.service"}) + failures, err := s.cli.ServicesRestart(context.Background(), []string{"service1.service", "service2.service"}, true) c.Assert(err, ErrorMatches, "failed to restart or reload services") c.Check(failures, HasLen, 2) failure0 := failures[0] @@ -681,7 +719,7 @@ s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddInt32(&n, 1) c.Assert(r.URL.Path, Equals, "/v1/notifications/finish-refresh") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) c.Check(err, IsNil) c.Check(string(body), DeepEquals, `{"instance-name":"some-snap"}`) }) diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/.gitignore snapd-2.63+23.10/vendor/github.com/snapcore/bolt/.gitignore --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/.gitignore 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/LICENSE snapd-2.63+23.10/vendor/github.com/snapcore/bolt/LICENSE --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/LICENSE 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/Makefile snapd-2.63+23.10/vendor/github.com/snapcore/bolt/Makefile --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/Makefile 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/README.md snapd-2.63+23.10/vendor/github.com/snapcore/bolt/README.md --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/README.md 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,935 +0,0 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## A message from the author - -> The original goal of Bolt was to provide a simple pure Go key/value store and to -> not bloat the code with extraneous features. To that end, the project has been -> a success. However, this limited scope also means that the project is complete. -> -> Maintaining an open source database requires an immense amount of time and energy. -> Changes to the code can have unintended and sometimes catastrophic effects so -> even simple changes require hours and hours of careful testing and validation. -> -> Unfortunately I no longer have the time or energy to continue this work. Bolt is -> in a stable state and has years of successful production use. As such, I feel that -> leaving it in its current state is the most prudent course of action. -> -> If you are interested in using a more featureful version of Bolt, I suggest that -> you look at the CoreOS fork called [bbolt](https://github.com/coreos/bbolt). - -- Ben Johnson ([@benbjohnson](https://twitter.com/benbjohnson)) - -## Table of Contents - -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB -* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework. - -If you are using Bolt in a project please send a pull request to add it to the list. diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/appveyor.yml snapd-2.63+23.10/vendor/github.com/snapcore/bolt/appveyor.yml --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/appveyor.yml 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\boltdb\bolt - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_386.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_386.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_386.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_386.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_amd64.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_amd64.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_amd64.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_amd64.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_arm.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_arm.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_arm.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_arm.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -package bolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_arm64.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_arm64.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_arm64.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_arm64.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// +build arm64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_linux.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_linux.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_linux.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -package bolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_openbsd.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_openbsd.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_openbsd.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_openbsd.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_ppc.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_ppc.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_ppc.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_ppc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// +build ppc - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// +build ppc64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64le.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64le.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64le.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_ppc64le.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// +build ppc64le - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_riscv64.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_riscv64.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_riscv64.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_riscv64.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// +build riscv64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_s390x.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_s390x.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_s390x.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_s390x.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// +build s390x - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_unix.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_unix.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_unix.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_unix.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -// +build !windows,!plan9,!solaris - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_unix_solaris.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_unix_solaris.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_unix_solaris.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_unix_solaris.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_windows.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_windows.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bolt_windows.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bolt_windows.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - lockExt = ".lock" - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - // Create a separate lock file on windows because a process - // cannot share an exclusive lock on the same file. This is - // needed during Tx.WriteTo(). - f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) - if err != nil { - return err - } - db.lockfile = f - - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) - db.lockfile.Close() - os.Remove(db.path + lockExt) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/boltsync_unix.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/boltsync_unix.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/boltsync_unix.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/boltsync_unix.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bucket.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bucket.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/bucket.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/bucket.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,777 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } - return nil, ErrIncompatibleValue - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/cursor.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/cursor.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/cursor.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/cursor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,400 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/db.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/db.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/db.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/db.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1037 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - path string - file *os.File - lockfile *os.File // windows only - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { - db.pageSize = int(m.pageSize) - } - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - c.err <- err - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/doc.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/doc.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/doc.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/doc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/errors.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/errors.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/errors.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/errors.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/freelist.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/freelist.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/freelist.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/freelist.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,252 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// copyall copies into dst a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, list := range f.pending { - m = append(m, list...) - } - sort.Sort(m) - mergepgids(dst, f.ids, m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - } - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - lenids := f.count() - if lenids == 0 { - p.count = uint16(lenids) - } else if lenids < 0xFFFF { - p.count = uint16(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool, len(f.ids)) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/node.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/node.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/node.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/node.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,604 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/page.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/page.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/page.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/page.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,197 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/bolt/tx.go snapd-2.63+23.10/vendor/github.com/snapcore/bolt/tx.go --- snapd-2.62+23.10/vendor/github.com/snapcore/bolt/tx.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/bolt/tx.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,684 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { _ = f.Close() }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff -Nru snapd-2.62+23.10/vendor/github.com/snapcore/secboot/efi/secureboot_policy.go snapd-2.63+23.10/vendor/github.com/snapcore/secboot/efi/secureboot_policy.go --- snapd-2.62+23.10/vendor/github.com/snapcore/secboot/efi/secureboot_policy.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/github.com/snapcore/secboot/efi/secureboot_policy.go 2024-04-24 00:00:39.000000000 +0000 @@ -1271,7 +1271,13 @@ // XXX: Work around the lack of support for handling SBAT revocations by just generating // a profile for the current values under the MS UEFI CA (minus the latest, which requires // an explicit opt-in via SbatPolicy). - for _, level := range [][]byte{[]byte("sbat,1,2021030218\n"), []byte("sbat,1,2022052400\ngrub,2\n")} { + for _, level := range [][]byte{ + []byte("sbat,1,2021030218\n"), + []byte("sbat,1,2022052400\ngrub,2\n"), + []byte("sbat,1,2023012900\nshim,2\ngrub,3\ngrub.debian,4\n"), + []byte("sbat,1,2024010900\nshim,4\ngrub,3\ngrub.debian,4\n"), + []byte("sbat,1,2024040900\nshim,4\ngrub,4\ngrub.peimage,2\n"), + } { profile1 := secboot_tpm2.NewPCRProtectionProfile() if err := gen.run(profile1, sigDbUpdateQuirkModeNone, level); err != nil { return xerrors.Errorf("cannot compute secure boot policy profile: %w", err) diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/.gitignore snapd-2.63+23.10/vendor/go.etcd.io/bbolt/.gitignore --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/.gitignore 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +*.prof +*.test +*.swp +/bin/ +cover.out +cover-*.out +/.idea +*.iml +/cmd/bbolt/bbolt + diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/LICENSE snapd-2.63+23.10/vendor/go.etcd.io/bbolt/LICENSE --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/LICENSE 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/Makefile snapd-2.63+23.10/vendor/go.etcd.io/bbolt/Makefile --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/Makefile 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,63 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +TESTFLAGS_RACE=-race=false +ifdef ENABLE_RACE + TESTFLAGS_RACE=-race=true +endif + +TESTFLAGS_CPU= +ifdef CPU + TESTFLAGS_CPU=-cpu=$(CPU) +endif +TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) + +.PHONY: fmt +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +.PHONY: lint +lint: + golangci-lint run ./... + +.PHONY: test +test: + @echo "hashmap freelist test" + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + + @echo "array freelist test" + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + +.PHONY: coverage +coverage: + @echo "hashmap freelist test" + TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + -coverprofile cover-freelist-hashmap.out -covermode atomic + + @echo "array freelist test" + TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + -coverprofile cover-freelist-array.out -covermode atomic + +.PHONY: gofail-enable +gofail-enable: install-gofail + gofail enable . + +.PHONY: gofail-disable +gofail-disable: + gofail disable . + +.PHONY: install-gofail +install-gofail: + go install go.etcd.io/gofail + +.PHONY: test-failpoint +test-failpoint: + @echo "[failpoint] hashmap freelist test" + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + + @echo "[failpoint] array freelist test" + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/README.md snapd-2.63+23.10/vendor/go.etcd.io/bbolt/README.md --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/README.md 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/README.md 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,967 @@ +bbolt +===== + +[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) +[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) +[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) +[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: https://www.symas.com/symas-embedded-database-lmdb + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Project versioning + +bbolt uses [semantic versioning](http://semver.org). +API should not change between patch and minor releases. +New minor versions may add additional features to the API. + +## Table of Contents + + - [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) + - [Resources](#resources) + - [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) + - [Caveats & Limitations](#caveats--limitations) + - [Reading the Source](#reading-the-source) + - [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: +```sh +$ go get go.etcd.io/bbolt@latest +``` + +This will retrieve the library and update your `go.mod` and `go.sum` files. + +To run the command line utility, execute: +```sh +$ go run go.etcd.io/bbolt/cmd/bbolt@latest +``` + +Run `go install` to install the `bbolt` command line utility into +your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the +`GOPATH` environment variable is not set. +```sh +$ go install go.etcd.io/bbolt/cmd/bbolt@latest +``` + +### Importing bbolt + +To use bbolt as an embedded key-value store, import as: + +```go +import bolt "go.etcd.io/bbolt" + +db, err := bolt.Open(path, 0666, nil) +if err != nil { + return err +} +defer db.Close() +``` + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + bolt "go.etcd.io/bbolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Transactions should not depend on one another and generally shouldn't be opened +simultaneously in the same goroutine. This can cause a deadlock as the read-write +transaction needs to periodically re-map the data file but it cannot do so while +any read-only transaction is open. Even a nested read-only transaction can cause +a deadlock, as the child transaction can block the parent transaction from releasing +its resources. + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. +* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. + +If you are using Bolt in a project please send a pull request to add it to the list. diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_386.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_386.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_386.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_386.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_amd64.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_amd64.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_amd64.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_amd64.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_arm.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_arm.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_arm.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_arm.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_arm64.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_arm64.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_arm64.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_arm64.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build arm64 +// +build arm64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_linux.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_linux.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_linux.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +package bbolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_loong64.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_loong64.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_loong64.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_loong64.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build loong64 +// +build loong64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_mips64x.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_mips64x.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_mips64x.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_mips64x.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build mips64 || mips64le +// +build mips64 mips64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_mipsx.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_mipsx.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_mipsx.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_mipsx.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build mips || mipsle +// +build mips mipsle + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_openbsd.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_openbsd.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_openbsd.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_openbsd.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,16 @@ +package bbolt + +import ( + "golang.org/x/sys/unix" +) + +func msync(db *DB) error { + return unix.Msync(db.data[:db.datasz], unix.MS_INVALIDATE) +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_ppc.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_ppc.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_ppc.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_ppc.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build ppc +// +build ppc + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build ppc64 +// +build ppc64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64le.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64le.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64le.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_ppc64le.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build ppc64le +// +build ppc64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_riscv64.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_riscv64.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_riscv64.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_riscv64.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build riscv64 +// +build riscv64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_s390x.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_s390x.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_s390x.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_s390x.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,10 @@ +//go:build s390x +// +build s390x + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_unix.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_unix.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_unix.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_unix.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,87 @@ +//go:build !windows && !plan9 && !solaris && !aix +// +build !windows,!plan9,!solaris,!aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } + for { + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_unix_aix.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_unix_aix.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_unix_aix.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_unix_aix.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,91 @@ +//go:build aix +// +build aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,88 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_windows.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_windows.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bolt_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bolt_windows.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,117 @@ +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY + if exclusive { + flags |= windows.LOCKFILE_EXCLUSIVE_LOCK + } + for { + // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range + // -1..0 as the lock on the database file. + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + + if err == nil { + return nil + } else if err != windows.ERROR_LOCK_VIOLATION { + return err + } + + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + var sizelo, sizehi uint32 + + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + sizehi = uint32(sz >> 32) + sizelo = uint32(sz) & 0xffffffff + } + + // Open a file mapping handle. + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0) + if addr == 0 { + // Do our best and report error returned from MapViewOfFile. + _ = syscall.CloseHandle(h) + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + var err1 error + if err := syscall.UnmapViewOfFile(addr); err != nil { + err1 = os.NewSyscallError("UnmapViewOfFile", err) + } + db.data = nil + db.datasz = 0 + return err1 +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/boltsync_unix.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/boltsync_unix.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/boltsync_unix.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/boltsync_unix.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,9 @@ +//go:build !windows && !plan9 && !linux && !openbsd +// +build !windows,!plan9,!linux,!openbsd + +package bbolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bucket.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bucket.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/bucket.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/bucket.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,799 @@ +package bbolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.IncCursorCount(1) + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // Unaligned access requires a copy to be made. + const unalignedMask = unsafe.Alignof(struct { + bucket + page + }{}) - 1 + unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(newKey) + + // Return an error if there is an existing key. + if bytes.Equal(newKey, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + c.node().put(newKey, newKey, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(newKey), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEachBucket(func(k []byte) error { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(newKey) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // gofail: var beforeBucketPut struct{} + + c.node().put(newKey, newKey, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Set the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&bucketLeafFlag != 0 { + if err := fn(k); err != nil { + return err + } + } + } + return nil +} + +// Stats returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int, pgstack []pgid) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * uintptr(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += int(used) + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += int(used) + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += uintptr(lastElement.pos + lastElement.ksize) + s.BranchInuse += int(used) + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = depth + 1 + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0, []pgid{b.root}) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgId) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() uintptr { + return uintptr(b.tx.db.pageSize / 4) +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgId pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgId]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgId) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgId] = n + + // Update statistics. + b.tx.stats.IncNodeCount(1) + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/compact.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/compact.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/compact.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/compact.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,119 @@ +package bbolt + +// Compact will create a copy of the source DB and in the destination DB. This may +// reclaim space that the source database no longer has use for. txMaxSize can be +// used to limit the transactions size of this process and may trigger intermittent +// commits. A value of zero will ignore transaction sizes. +// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 +func Compact(dst, src *DB, txMaxSize int64) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer func() { + if tempErr := tx.Rollback(); tempErr != nil { + err = tempErr + } + }() + + if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > txMaxSize && txMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // Fill the entire page for best compaction. + b.FillPercent = 1.0 + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + err = tx.Commit() + + return err +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func walk(db *DB, walkFn walkFunc) error { + return db.View(func(tx *Tx) error { + return tx.ForEach(func(name []byte, b *Bucket) error { + return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/cursor.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/cursor.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/cursor.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/cursor.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,420 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket +// in lexicographical order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.first() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +func (c *Cursor) first() (key []byte, value []byte, flags uint32) { + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.goToFirstElementOnTheStack() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil, flags + } + return k, v, flags +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + + // If this is an empty page (calling Delete may result in empty pages) + // we call prev to find the last page that is not empty + for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 { + c.prev() + } + + if len(c.stack) == 0 { + return nil, nil + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.prev() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key using a b-tree search and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) goToFirstElementOnTheStack() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgId pgid + if ref.node != nil { + pgId = ref.node.inodes[ref.index].pgid + } else { + pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgId) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgId pgid + if ref.node != nil { + pgId = ref.node.inodes[ref.index].pgid + } else { + pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgId) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.goToFirstElementOnTheStack() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil, 0 + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + return c.keyValue() +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgId pgid) { + p, n := c.bucket.pageNode(pgId) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(ref.index) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/db.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/db.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/db.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/db.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,1389 @@ +package bbolt + +import ( + "errors" + "fmt" + "hash/fnv" + "io" + "os" + "runtime" + "sort" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +const pgidNoFreelist pgid = 0xffffffffffffffff + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + +// FreelistType is the type of the freelist backend +type FreelistType string + +const ( + // FreelistArrayType indicates backend freelist type is array + FreelistArrayType = FreelistType("array") + // FreelistMapType indicates backend freelist type is hashmap + FreelistMapType = FreelistType("hashmap") +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // Put `stats` at the first field to ensure it's 64-bit aligned. Note that + // the first word in an allocated struct can be relied upon to be 64-bit + // aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. Also + // refer to discussion in https://github.com/etcd-io/bbolt/issues/577. + stats Stats + + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and fragmentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // When `true`, bbolt will always load the free pages when opening the DB. + // When opening db in write mode, this flag will always automatically + // set to `true`. + PreLoadFreelist bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + // Mlock locks database file in memory when set to true. + // It prevents major page faults, however used memory can't be reclaimed. + // + // Supported only on Unix via mlock/munlock syscalls. + Mlock bool + + path string + openFile func(string, int, os.FileMode) (*os.File, error) + file *os.File + // `dataref` isn't used at all on Windows, and the golangci-lint + // always fails on Windows platform. + //nolint + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + + freelist *freelist + freelistLoad sync.Once + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + db := &DB{ + opened: true, + } + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoSync = options.NoSync + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync + db.PreLoadFreelist = options.PreLoadFreelist + db.FreelistType = options.FreelistType + db.Mlock = options.Mlock + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } else { + // always load free pages in write mode + db.PreLoadFreelist = true + } + + db.openFile = options.OpenFile + if db.openFile == nil { + db.openFile = os.OpenFile + } + + // Open data file and separate sync handler for metadata writes. + var err error + if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + db.path = db.file.Name() + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + _ = db.close() + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + // clean up file descriptor on initialization fail + _ = db.close() + return nil, err + } + } else { + // try to get the page size from the metadata pages + if pgSize, err := db.getPageSize(); err == nil { + db.pageSize = pgSize + } else { + _ = db.close() + return nil, ErrInvalid + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + if db.PreLoadFreelist { + db.loadFreelist() + } + + if db.readOnly { + return db, nil + } + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } + + // Mark the database as opened and return. + return db, nil +} + +// getPageSize reads the pageSize from the meta pages. It tries +// to read the first meta page firstly. If the first page is invalid, +// then it tries to read the second page using the default page size. +func (db *DB) getPageSize() (int, error) { + var ( + meta0CanRead, meta1CanRead bool + ) + + // Read the first meta page to determine the page size. + if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil { + // We cannot read the page size from page 0, but can read page 0. + meta0CanRead = canRead + } else { + return pgSize, nil + } + + // Read the second meta page to determine the page size. + if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil { + // We cannot read the page size from page 1, but can read page 1. + meta1CanRead = canRead + } else { + return pgSize, nil + } + + // If we can't read the page size from both pages, but can read + // either page, then we assume it's the same as the OS or the one + // given, since that's how the page size was chosen in the first place. + // + // If both pages are invalid, and (this OS uses a different page size + // from what the database was created with or the given page size is + // different from what the database was created with), then we are out + // of luck and cannot access the database. + if meta0CanRead || meta1CanRead { + return db.pageSize, nil + } + + return 0, ErrInvalid +} + +// getPageSizeFromFirstMeta reads the pageSize from the first meta page +func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { + var buf [0x1000]byte + var metaCanRead bool + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + metaCanRead = true + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + return int(m.pageSize), metaCanRead, nil + } + } + return 0, metaCanRead, ErrInvalid +} + +// getPageSizeFromSecondMeta reads the pageSize from the second meta page +func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { + var ( + fileSize int64 + metaCanRead bool + ) + + // get the db file size + if info, err := db.file.Stat(); err != nil { + return 0, metaCanRead, err + } else { + fileSize = info.Size() + } + + // We need to read the second meta page, so we should skip the first page; + // but we don't know the exact page size yet, it's chicken & egg problem. + // The solution is to try all the possible page sizes, which starts from 1KB + // and until 16MB (1024<<14) or the end of the db file + // + // TODO: should we support larger page size? + for i := 0; i <= 14; i++ { + var buf [0x1000]byte + var pos int64 = 1024 << uint(i) + if pos >= fileSize-1024 { + break + } + bw, err := db.file.ReadAt(buf[:], pos) + if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { + metaCanRead = true + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + return int(m.pageSize), metaCanRead, nil + } + } + } + + return 0, metaCanRead, ErrInvalid +} + +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist(db.FreelistType) + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = db.freelist.free_count() + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) (err error) { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + fileSize := int(info.Size()) + var size = fileSize + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + if db.Mlock { + // Unlock db memory + if err := db.munlock(fileSize); err != nil { + return err + } + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err = db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + // gofail: var mapError string + // return errors.New(mapError) + if err = mmap(db, size); err != nil { + return err + } + + // Perform unmmap on any error to reset all data fields: + // dataref, data, datasz, meta0 and meta1. + defer func() { + if err != nil { + if unmapErr := db.munmap(); unmapErr != nil { + err = fmt.Errorf("%w; rollback unmap also failed: %v", err, unmapErr) + } + } + }() + + if db.Mlock { + // Don't allow swapping of data file + if err := db.mlock(fileSize); err != nil { + return err + } + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +func (db *DB) invalidate() { + db.dataref = nil + db.data = nil + db.datasz = 0 + + db.meta0 = nil + db.meta1 = nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + defer db.invalidate() + + // gofail: var unmapError string + // return errors.New(unmapError) + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +func (db *DB) munlock(fileSize int) error { + // gofail: var munlockError string + // return errors.New(munlockError) + if err := munlock(db, fileSize); err != nil { + return fmt.Errorf("munlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mlock(fileSize int) error { + // gofail: var mlockError string + // return errors.New(mlockError) + if err := mlock(db, fileSize); err != nil { + return fmt.Errorf("mlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { + if err := db.munlock(fileSizeFrom); err != nil { + return err + } + if err := db.mlock(fileSizeTo); err != nil { + return err + } + return nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf, pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf, pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf, pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + db.filesz = len(buf) + + return nil +} + +// Close releases all database resources. +// It will block waiting for any open transactions to finish +// before closing the database and returning. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + var errs []error + // Close the mmap. + if err := db.munmap(); err != nil { + errs = append(errs, err) + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + errs = append(errs, fmt.Errorf("bolt.Close(): funlock error: %w", err)) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + errs = append(errs, fmt.Errorf("db file close: %w", err)) + } + db.file = nil + } + + db.path = "" + + if len(errs) > 0 { + return errs[0] + } + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Exit if the database is not correctly mapped. + if db.data == nil { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrInvalidMapping + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Exit if the database is not correctly mapped. + if db.data == nil { + db.rwlock.Unlock() + return nil, ErrInvalidMapping + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + db.freePages() + return t, nil +} + +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid + } + if minid > 0 { + db.freelist.release(minid - 1) + } + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. +} + +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Rollback() +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + c.err <- err + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + _assert(db.data != nil, "database file isn't correctly mapped") + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise, fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(txid txid, count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(txid, count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz <= db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + if db.Mlock { + // unlock old file and lock new one + if err := db.mrelock(db.filesz, sz); err != nil { + return fmt.Errorf("mlock/munlock error: %s", err) + } + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + close(ech) + + // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + + // PreLoadFreelist sets whether to load the free pages when opening + // the db file. Note when opening db in write mode, bbolt will always + // load the free pages. + PreLoadFreelist bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and fragmentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool + + // OpenFile is used to open files. It defaults to os.OpenFile. This option + // is useful for writing hermetic tests. + OpenFile func(string, int, os.FileMode) (*os.File, error) + + // Mlock locks database file in memory when set to true. + // It prevents potential page faults, however + // used memory can't be reclaimed. (UNIX only) + Mlock bool +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, + FreelistType: FreelistArrayType, +} + +// Stats represents statistics about the database. +type Stats struct { + // Put `TxStats` at the first field to ensure it's 64-bit aligned. Note + // that the first word in an allocated struct can be relied upon to be + // 64-bit aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. + // Also refer to discussion in https://github.com/etcd-io/bbolt/issues/577. + TxStats TxStats // global, ongoing stats. + + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/doc.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/doc.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/doc.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,40 @@ +/* +package bbolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + +# Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + +# Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. +*/ +package bbolt diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/errors.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/errors.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/errors.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,78 @@ +package bbolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/freelist.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/freelist.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/freelist.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/freelist.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,405 @@ +package bbolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[pgid]struct{} + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + freelistType FreelistType // freelist type + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]struct{} // fast lookup of all free and pending page ids. + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[pgid]uint64 // key is start pgid, value is its span size + backwardMap map[pgid]uint64 // key is end pgid, value is its span size + allocate func(txid txid, n int) pgid // the freelist allocate func + free_count func() int // the function which gives you free page number + mergeSpans func(ids pgids) // the mergeSpan func + getFreePageIDs func() []pgid // get free pgids func + readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist(freelistType FreelistType) *freelist { + f := &freelist{ + freelistType: freelistType, + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), + cache: make(map[pgid]struct{}), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[pgid]uint64), + backwardMap: make(map[pgid]uint64), + } + + if freelistType == FreelistMapType { + f.allocate = f.hashmapAllocate + f.free_count = f.hashmapFreeCount + f.mergeSpans = f.hashmapMergeSpans + f.getFreePageIDs = f.hashmapGetFreePageIDs + f.readIDs = f.hashmapReadIDs + } else { + f.allocate = f.arrayAllocate + f.free_count = f.arrayFreeCount + f.mergeSpans = f.arrayMergeSpans + f.getFreePageIDs = f.arrayGetFreePageIDs + f.readIDs = f.arrayReadIDs + } + + return f +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// arrayFreeCount returns count of free pages(array version) +func (f *freelist) arrayFreeCount() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, txp := range f.pending { + count += len(txp.ids) + } + return count +} + +// copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + mergepgids(dst, f.getFreePageIDs(), m) +} + +// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) arrayAllocate(txid txid, n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if _, ok := f.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + f.cache[id] = struct{}{} + } +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, txp := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + txp := f.pending[txid] + if txp == nil { + return + } + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(f.pending, txid) + f.mergeSpans(m) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgId pgid) bool { + _, ok := f.cache[pgId] + return ok +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + var ids []pgid + data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) + unsafeSlice(unsafe.Pointer(&ids), data, count) + + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]pgid, count) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(pgids(idsCopy)) + + f.readIDs(idsCopy) + } +} + +// arrayReadIDs initializes the freelist from a given list of ids. +func (f *freelist) arrayReadIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + +func (f *freelist) arrayGetFreePageIDs() []pgid { + return f.ids +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := f.count() + if l == 0 { + p.count = uint16(l) + } else if l < 0xFFFF { + p.count = uint16(l) + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l) + f.copyall(ids) + } else { + p.count = 0xFFFF + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids[0] = pgid(l) + f.copyall(ids[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.getFreePageIDs() { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// noSyncReload reads the freelist from pgids and filters out pending items. +func (f *freelist) noSyncReload(pgids []pgid) { + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range pgids { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + ids := f.getFreePageIDs() + f.cache = make(map[pgid]struct{}, len(ids)) + for _, id := range ids { + f.cache[id] = struct{}{} + } + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + f.cache[pendingID] = struct{}{} + } + } +} + +// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array +func (f *freelist) arrayMergeSpans(ids pgids) { + sort.Sort(ids) + f.ids = pgids(f.ids).merge(ids) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/freelist_hmap.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/freelist_hmap.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/freelist_hmap.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/freelist_hmap.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,178 @@ +package bbolt + +import "sort" + +// hashmapFreeCount returns count of free pages(hashmap version) +func (f *freelist) hashmapFreeCount() int { + // use the forwardMap to get the total count + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend +func (f *freelist) hashmapAllocate(txid txid, n int) pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+pgid(n), remain) + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) +func (f *freelist) hashmapReadIDs(pgids []pgid) { + f.init(pgids) + + // Rebuild the page cache. + f.reindex() +} + +// hashmapGetFreePageIDs returns the sorted free page ids +func (f *freelist) hashmapGetFreePageIDs() []pgid { + count := f.free_count() + if count == 0 { + return nil + } + + m := make([]pgid, 0, count) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + m = append(m, start+pgid(i)) + } + } + sort.Sort(pgids(m)) + + return m +} + +// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans +func (f *freelist) hashmapMergeSpans(ids pgids) { + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *freelist) mergeWithExistingSpan(pid pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - pgid(preSize) + f.delSpan(start, preSize) + + newStart -= pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +func (f *freelist) addSpan(start pgid, size uint64) { + f.backwardMap[start-1+pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} +} + +func (f *freelist) delSpan(start pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } +} + +// initial from pgids using when use hashmap version +// pgids must be sorted +func (f *freelist) init(pgids []pgid) { + if len(pgids) == 0 { + return + } + + size := uint64(1) + start := pgids[0] + + if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[pgid]uint64) + f.backwardMap = make(map[pgid]uint64) + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/mlock_unix.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/mlock_unix.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/mlock_unix.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/mlock_unix.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,37 @@ +//go:build !windows +// +build !windows + +package bbolt + +import "golang.org/x/sys/unix" + +// mlock locks memory of db file +func mlock(db *DB, fileSize int) error { + sizeToLock := fileSize + if sizeToLock > db.datasz { + // Can't lock more than mmaped slice + sizeToLock = db.datasz + } + if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { + return err + } + return nil +} + +// munlock unlocks memory of db file +func munlock(db *DB, fileSize int) error { + if db.dataref == nil { + return nil + } + + sizeToUnlock := fileSize + if sizeToUnlock > db.datasz { + // Can't unlock more than mmaped slice + sizeToUnlock = db.datasz + } + + if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { + return err + } + return nil +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/mlock_windows.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/mlock_windows.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/mlock_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/mlock_windows.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,11 @@ +package bbolt + +// mlock locks memory of db file +func mlock(_ *DB, _ int) error { + panic("mlock is supported only on UNIX systems") +} + +// munlock unlocks memory of db file +func munlock(_ *DB, _ int) error { + panic("munlock is supported only on UNIX systems") +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/node.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/node.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/node.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/node.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,610 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + } + return int(sz) +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v uintptr) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() uintptr { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgId + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set +// and the rest should be zeroed. +func (n *node) write(p *page) { + _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") + + // Initialize page. + if n.isLeaf { + p.flags = leafPageFlag + } else { + p.flags = branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.key) + len(item.value) + b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.key) + copy(b[l:], item.value) + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize uintptr) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize uintptr) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.IncSplit(1) + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz uintptr) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = uintptr(i) + inode := n.inodes[i] + elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(uintptr(tx.db.pageSize)) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.IncSpill(1) + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.IncRebalance(1) + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.IncNodeDeref(1) +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +func compareKeys(left, right []byte) int { + return bytes.Compare(left, right) +} + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { + return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 +} + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/page.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/page.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/page.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/page.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,214 @@ +package bbolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = unsafe.Sizeof(page{}) + +const minKeysPerPage = 2 + +const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *page) fastCheck(id pgid) { + _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + _assert(p.flags == branchPageFlag || + p.flags == leafPageFlag || + p.flags == metaPageFlag || + p.flags == freelistPageFlag, + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + leafPageElementSize, int(index))) +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + var elems []leafPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + var elems []branchPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/tx.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/tx.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/tx.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/tx.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,797 @@ +package bbolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "sync/atomic" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + return fn(k, tx.root.Bucket(k)) + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.GetRebalance() > 0 { + tx.stats.IncRebalanceTime(time.Since(startTime)) + } + + opgid := tx.meta.pgid + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.IncSpillTime(time.Since(startTime)) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + } + + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { + return err + } + } else { + tx.meta.freelist = pgidNoFreelist + } + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.IncWriteTime(time.Since(startTime)) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.nonPhysicalRollback() + return nil +} + +// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk. +func (tx *Tx) nonPhysicalRollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + } + tx.close() +} + +// rollback needs to reload the free pages from disk in case some system error happens like fsync error. +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + // When mmap fails, the `data`, `dataref` and `datasz` may be reset to + // zero values, and there is no way to reload free page IDs in this case. + if tx.db.data != nil { + if !tx.db.hasSyncedFreelist() { + // Reconstruct free page list by scanning the DB to get the whole free page list. + // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.noSyncReload(tx.db.freepages()) + } else { + // Read free page list from freelist page. + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + } + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, nil +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + _, err = tx.WriteTo(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(tx.meta.txid, count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.IncPageCount(int64(count)) + tx.stats.IncPageAlloc(int64(count * tx.db.pageSize)) + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) + offset := int64(p.id) * int64(tx.db.pageSize) + var written uintptr + + // Write out page in "max allocation" sized chunks. + for { + sz := rem + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.IncWrite(1) + + // Exit inner for loop if we've written all the chunks. + rem -= sz + if rem == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + written += uintptr(sz) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) //nolint:staticcheck + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.IncWrite(1) + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + p.fastCheck(id) + return p + } + } + + // Otherwise return directly from the mmap. + p := tx.db.page(id) + p.fastCheck(id) + return p +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { + stack := make([]pgid, 10) + stack[0] = pgidnum + tx.forEachPageInternal(stack[:1], fn) +} + +func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { + p := tx.page(pgidstack[len(pgidstack)-1]) + + // Execute function. + fn(p, len(pgidstack)-1, pgidstack) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + if tx.db.freelist == nil { + return nil, ErrFreePagesNotLoaded + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + // + // DEPRECATED: Use GetPageCount() or IncPageCount() + PageCount int64 // number of page allocations + // DEPRECATED: Use GetPageAlloc() or IncPageAlloc() + PageAlloc int64 // total bytes allocated + + // Cursor statistics. + // + // DEPRECATED: Use GetCursorCount() or IncCursorCount() + CursorCount int64 // number of cursors created + + // Node statistics + // + // DEPRECATED: Use GetNodeCount() or IncNodeCount() + NodeCount int64 // number of node allocations + // DEPRECATED: Use GetNodeDeref() or IncNodeDeref() + NodeDeref int64 // number of node dereferences + + // Rebalance statistics. + // + // DEPRECATED: Use GetRebalance() or IncRebalance() + Rebalance int64 // number of node rebalances + // DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime() + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + // + // DEPRECATED: Use GetSplit() or IncSplit() + Split int64 // number of nodes split + // DEPRECATED: Use GetSpill() or IncSpill() + Spill int64 // number of nodes spilled + // DEPRECATED: Use GetSpillTime() or IncSpillTime() + SpillTime time.Duration // total time spent spilling + + // Write statistics. + // + // DEPRECATED: Use GetWrite() or IncWrite() + Write int64 // number of writes performed + // DEPRECATED: Use GetWriteTime() or IncWriteTime() + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.IncPageCount(other.GetPageCount()) + s.IncPageAlloc(other.GetPageAlloc()) + s.IncCursorCount(other.GetCursorCount()) + s.IncNodeCount(other.GetNodeCount()) + s.IncNodeDeref(other.GetNodeDeref()) + s.IncRebalance(other.GetRebalance()) + s.IncRebalanceTime(other.GetRebalanceTime()) + s.IncSplit(other.GetSplit()) + s.IncSpill(other.GetSpill()) + s.IncSpillTime(other.GetSpillTime()) + s.IncWrite(other.GetWrite()) + s.IncWriteTime(other.GetWriteTime()) +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.GetPageCount() - other.GetPageCount() + diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc() + diff.CursorCount = s.GetCursorCount() - other.GetCursorCount() + diff.NodeCount = s.GetNodeCount() - other.GetNodeCount() + diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref() + diff.Rebalance = s.GetRebalance() - other.GetRebalance() + diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime() + diff.Split = s.GetSplit() - other.GetSplit() + diff.Spill = s.GetSpill() - other.GetSpill() + diff.SpillTime = s.GetSpillTime() - other.GetSpillTime() + diff.Write = s.GetWrite() - other.GetWrite() + diff.WriteTime = s.GetWriteTime() - other.GetWriteTime() + return diff +} + +// GetPageCount returns PageCount atomically. +func (s *TxStats) GetPageCount() int64 { + return atomic.LoadInt64(&s.PageCount) +} + +// IncPageCount increases PageCount atomically and returns the new value. +func (s *TxStats) IncPageCount(delta int64) int64 { + return atomic.AddInt64(&s.PageCount, delta) +} + +// GetPageAlloc returns PageAlloc atomically. +func (s *TxStats) GetPageAlloc() int64 { + return atomic.LoadInt64(&s.PageAlloc) +} + +// IncPageAlloc increases PageAlloc atomically and returns the new value. +func (s *TxStats) IncPageAlloc(delta int64) int64 { + return atomic.AddInt64(&s.PageAlloc, delta) +} + +// GetCursorCount returns CursorCount atomically. +func (s *TxStats) GetCursorCount() int64 { + return atomic.LoadInt64(&s.CursorCount) +} + +// IncCursorCount increases CursorCount atomically and return the new value. +func (s *TxStats) IncCursorCount(delta int64) int64 { + return atomic.AddInt64(&s.CursorCount, delta) +} + +// GetNodeCount returns NodeCount atomically. +func (s *TxStats) GetNodeCount() int64 { + return atomic.LoadInt64(&s.NodeCount) +} + +// IncNodeCount increases NodeCount atomically and returns the new value. +func (s *TxStats) IncNodeCount(delta int64) int64 { + return atomic.AddInt64(&s.NodeCount, delta) +} + +// GetNodeDeref returns NodeDeref atomically. +func (s *TxStats) GetNodeDeref() int64 { + return atomic.LoadInt64(&s.NodeDeref) +} + +// IncNodeDeref increases NodeDeref atomically and returns the new value. +func (s *TxStats) IncNodeDeref(delta int64) int64 { + return atomic.AddInt64(&s.NodeDeref, delta) +} + +// GetRebalance returns Rebalance atomically. +func (s *TxStats) GetRebalance() int64 { + return atomic.LoadInt64(&s.Rebalance) +} + +// IncRebalance increases Rebalance atomically and returns the new value. +func (s *TxStats) IncRebalance(delta int64) int64 { + return atomic.AddInt64(&s.Rebalance, delta) +} + +// GetRebalanceTime returns RebalanceTime atomically. +func (s *TxStats) GetRebalanceTime() time.Duration { + return atomicLoadDuration(&s.RebalanceTime) +} + +// IncRebalanceTime increases RebalanceTime atomically and returns the new value. +func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration { + return atomicAddDuration(&s.RebalanceTime, delta) +} + +// GetSplit returns Split atomically. +func (s *TxStats) GetSplit() int64 { + return atomic.LoadInt64(&s.Split) +} + +// IncSplit increases Split atomically and returns the new value. +func (s *TxStats) IncSplit(delta int64) int64 { + return atomic.AddInt64(&s.Split, delta) +} + +// GetSpill returns Spill atomically. +func (s *TxStats) GetSpill() int64 { + return atomic.LoadInt64(&s.Spill) +} + +// IncSpill increases Spill atomically and returns the new value. +func (s *TxStats) IncSpill(delta int64) int64 { + return atomic.AddInt64(&s.Spill, delta) +} + +// GetSpillTime returns SpillTime atomically. +func (s *TxStats) GetSpillTime() time.Duration { + return atomicLoadDuration(&s.SpillTime) +} + +// IncSpillTime increases SpillTime atomically and returns the new value. +func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration { + return atomicAddDuration(&s.SpillTime, delta) +} + +// GetWrite returns Write atomically. +func (s *TxStats) GetWrite() int64 { + return atomic.LoadInt64(&s.Write) +} + +// IncWrite increases Write atomically and returns the new value. +func (s *TxStats) IncWrite(delta int64) int64 { + return atomic.AddInt64(&s.Write, delta) +} + +// GetWriteTime returns WriteTime atomically. +func (s *TxStats) GetWriteTime() time.Duration { + return atomicLoadDuration(&s.WriteTime) +} + +// IncWriteTime increases WriteTime atomically and returns the new value. +func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration { + return atomicAddDuration(&s.WriteTime, delta) +} + +func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration { + return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du))) +} + +func atomicLoadDuration(ptr *time.Duration) time.Duration { + return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr)))) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/tx_check.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/tx_check.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/tx_check.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/tx_check.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,226 @@ +package bbolt + +import ( + "encoding/hex" + "fmt" +) + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + return tx.CheckWithOptions() +} + +// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// so that bolt can generate human-readable diagnostic messages. +func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { + chkConfig := checkConfig{ + kvStringer: HexKVStringer(), + } + for _, op := range options { + op(&chkConfig) + } + + ch := make(chan error) + go tx.check(chkConfig.kvStringer, ch) + return ch +} + +func (tx *Tx) check(kvStringer KVStringer, ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) + } + }) + + tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + + // Check each bucket within this bucket. + _ = b.ForEachBucket(func(k []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, kvStringer, ch) + } + return nil + }) +} + +// recursivelyCheckPages confirms database consistency with respect to b-tree +// key order constraints: +// - keys on pages must be sorted +// - keys on children pages are between 2 consecutive keys on the parent's branch page). +func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +} + +// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// - >=`minKeyClosed` (can be nil) +// - <`maxKeyOpen` (can be nil) +// - Are in right ordering relationship to their parents. +// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. +func (tx *Tx) recursivelyCheckPagesInternal( + pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, + keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { + + p := tx.page(pgId) + pagesStack = append(pagesStack, pgId) + switch { + case p.flags&branchPageFlag != 0: + // For branch page we navigate ranges of all subpages. + runningMin := minKeyClosed + for i := range p.branchPageElements() { + elem := p.branchPageElement(uint16(i)) + verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + + maxKey := maxKeyOpen + if i < len(p.branchPageElements())-1 { + maxKey = p.branchPageElement(uint16(i + 1)).key() + } + maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + runningMin = maxKeyInSubtree + } + return maxKeyInSubtree + case p.flags&leafPageFlag != 0: + runningMin := minKeyClosed + for i := range p.leafPageElements() { + elem := p.leafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.key() + } + if p.count > 0 { + return p.leafPageElement(p.count - 1).key() + } + default: + ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + } + return maxKeyInSubtree +} + +/*** + * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", + * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). + */ +func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { + if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { + ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } + if index > 0 { + cmpRet := compareKeys(previousKey, key) + if cmpRet > 0 { + ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } + if cmpRet == 0 { + ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } + } + if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 { + ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } +} + +// =========================================================================================== + +type checkConfig struct { + kvStringer KVStringer +} + +type CheckOption func(options *checkConfig) + +func WithKVStringer(kvStringer KVStringer) CheckOption { + return func(c *checkConfig) { + c.kvStringer = kvStringer + } +} + +// KVStringer allows to prepare human-readable diagnostic messages. +type KVStringer interface { + KeyToString([]byte) string + ValueToString([]byte) string +} + +// HexKVStringer serializes both key & value to hex representation. +func HexKVStringer() KVStringer { + return hexKvStringer{} +} + +type hexKvStringer struct{} + +func (_ hexKvStringer) KeyToString(key []byte) string { + return hex.EncodeToString(key) +} + +func (_ hexKvStringer) ValueToString(value []byte) string { + return hex.EncodeToString(value) +} diff -Nru snapd-2.62+23.10/vendor/go.etcd.io/bbolt/unsafe.go snapd-2.63+23.10/vendor/go.etcd.io/bbolt/unsafe.go --- snapd-2.62+23.10/vendor/go.etcd.io/bbolt/unsafe.go 1970-01-01 00:00:00.000000000 +0000 +++ snapd-2.63+23.10/vendor/go.etcd.io/bbolt/unsafe.go 2024-04-24 00:00:39.000000000 +0000 @@ -0,0 +1,39 @@ +package bbolt + +import ( + "reflect" + "unsafe" +) + +func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset) +} + +func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) +} + +func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { + // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices + // + // This memory is not allocated from C, but it is unmanaged by Go's + // garbage collector and should behave similarly, and the compiler + // should produce similar code. Note that this conversion allows a + // subslice to begin after the base address, with an optional offset, + // while the URL above does not cover this case and only slices from + // index 0. However, the wiki never says that the address must be to + // the beginning of a C allocation (or even that malloc was used at + // all), so this is believed to be correct. + return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] +} + +// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by +// the slice parameter. This helper should be used over other direct +// manipulation of reflect.SliceHeader to prevent misuse, namely, converting +// from reflect.SliceHeader to a Go slice type. +func unsafeSlice(slice, data unsafe.Pointer, len int) { + s := (*reflect.SliceHeader)(slice) + s.Data = uintptr(data) + s.Cap = len + s.Len = len +} diff -Nru snapd-2.62+23.10/vendor/modules.txt snapd-2.63+23.10/vendor/modules.txt --- snapd-2.62+23.10/vendor/modules.txt 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/vendor/modules.txt 2024-04-24 00:00:39.000000000 +0000 @@ -58,14 +58,11 @@ # github.com/seccomp/libseccomp-golang v0.9.2-0.20220502024300-f57e1d55ea18 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/snapcore/bolt v1.3.2-0.20210908134111-63c8bfcf7af8 -## explicit -github.com/snapcore/bolt # github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785 ## explicit; go 1.13 github.com/snapcore/go-gettext github.com/snapcore/go-gettext/pluralforms -# github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830 +# github.com/snapcore/secboot v0.0.0-20240411101434-f3ad7c92552a ## explicit github.com/snapcore/secboot github.com/snapcore/secboot/efi @@ -77,6 +74,9 @@ github.com/snapcore/secboot/internal/tcti github.com/snapcore/secboot/internal/truststore github.com/snapcore/secboot/tpm2 +# go.etcd.io/bbolt v1.3.9 +## explicit; go 1.17 +go.etcd.io/bbolt # go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 ## explicit; go 1.11 go.mozilla.org/pkcs7 diff -Nru snapd-2.62+23.10/wrappers/core18.go snapd-2.63+23.10/wrappers/core18.go --- snapd-2.62+23.10/wrappers/core18.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/wrappers/core18.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -49,7 +48,7 @@ // snapdUnitSkipStart returns true for units that should not be started // automatically func snapdUnitSkipStart(unitPath string) (skip bool, err error) { - content, err := ioutil.ReadFile(unitPath) + content, err := os.ReadFile(unitPath) if err != nil { if os.IsNotExist(err) { // no point in starting units that do not exist @@ -238,7 +237,7 @@ if err != nil { return nil, err } - content, err := ioutil.ReadFile(unit) + content, err := os.ReadFile(unit) if err != nil { return nil, err } @@ -483,7 +482,7 @@ if err != nil { return err } - content, err := ioutil.ReadFile(unit) + content, err := os.ReadFile(unit) if err != nil { return err } diff -Nru snapd-2.62+23.10/wrappers/dbus.go snapd-2.63+23.10/wrappers/dbus.go --- snapd-2.62+23.10/wrappers/dbus.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/wrappers/dbus.go 2024-04-24 00:00:39.000000000 +0000 @@ -21,7 +21,6 @@ import ( "bytes" - "io/ioutil" "os" "path/filepath" "regexp" @@ -68,7 +67,7 @@ // snapNameFromServiceFile returns the snap name for the D-Bus service activation file. func snapNameFromServiceFile(filename string) (owner string, err error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return "", err } diff -Nru snapd-2.62+23.10/wrappers/desktop.go snapd-2.63+23.10/wrappers/desktop.go --- snapd-2.62+23.10/wrappers/desktop.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/wrappers/desktop.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "bufio" "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -258,7 +257,7 @@ content := make(map[string]osutil.FileState) for _, df := range desktopFiles { base := filepath.Base(df) - fileContent, err := ioutil.ReadFile(df) + fileContent, err := os.ReadFile(df) if err != nil { return nil, err } diff -Nru snapd-2.62+23.10/wrappers/internal/service_status.go snapd-2.63+23.10/wrappers/internal/service_status.go --- snapd-2.62+23.10/wrappers/internal/service_status.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/wrappers/internal/service_status.go 2024-04-24 00:00:39.000000000 +0000 @@ -123,23 +123,10 @@ return cli.ServiceStatus(ctx, units) } -func clientUnitStatusToSystemdUnitStatus(unitStatus client.ServiceUnitStatus) *systemd.UnitStatus { - return &systemd.UnitStatus{ - Daemon: unitStatus.Daemon, - Id: unitStatus.Id, - Name: unitStatus.Name, - Names: unitStatus.Names, - Enabled: unitStatus.Enabled, - Active: unitStatus.Active, - Installed: unitStatus.Installed, - NeedDaemonReload: unitStatus.NeedDaemonReload, - } -} - func mapServiceStatusMany(stss []client.ServiceUnitStatus) map[string]*systemd.UnitStatus { stsMap := make(map[string]*systemd.UnitStatus, len(stss)) for _, sts := range stss { - stsMap[sts.Name] = clientUnitStatusToSystemdUnitStatus(sts) + stsMap[sts.Name] = sts.SystemdUnitStatus() } return stsMap } diff -Nru snapd-2.62+23.10/wrappers/services.go snapd-2.63+23.10/wrappers/services.go --- snapd-2.62+23.10/wrappers/services.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/wrappers/services.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "context" "fmt" - "io/ioutil" "os" "os/user" "path/filepath" @@ -132,7 +131,9 @@ func (c *userServiceClient) stopServices(services ...string) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout.DefaultTimeout)) defer cancel() - failures, err := c.cli.ServicesStop(ctx, services) + + const disable = false + failures, err := c.cli.ServicesStop(ctx, services, disable) for _, f := range failures { c.inter.Notify(fmt.Sprintf("Could not stop service %q for uid %d: %s", f.Service, f.Uid, f.Error)) } @@ -142,7 +143,8 @@ func (c *userServiceClient) startServices(services ...string) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout.DefaultTimeout)) defer cancel() - startFailures, stopFailures, err := c.cli.ServicesStart(ctx, services) + + startFailures, stopFailures, err := c.cli.ServicesStart(ctx, services, client.ClientServicesStartOptions{}) for _, f := range startFailures { c.inter.Notify(fmt.Sprintf("Could not start service %q for uid %d: %s", f.Service, f.Uid, f.Error)) } @@ -155,13 +157,7 @@ func (c *userServiceClient) restartServices(reload bool, services ...string) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout.DefaultTimeout)) defer cancel() - var failures []client.ServiceFailure - var err error - if reload { - failures, err = c.cli.ServicesReloadOrRestart(ctx, services) - } else { - failures, err = c.cli.ServicesRestart(ctx, services) - } + failures, err := c.cli.ServicesRestart(ctx, services, reload) for _, f := range failures { c.inter.Notify(fmt.Sprintf("Could not restart service %q for uid %d: %s", f.Service, f.Uid, f.Error)) } @@ -416,7 +412,7 @@ oldFileState := osutil.MemoryFileState{} if st, err := os.Stat(path); err == nil { - b, err := ioutil.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { return nil, false, err } diff -Nru snapd-2.62+23.10/wrappers/services_test.go snapd-2.63+23.10/wrappers/services_test.go --- snapd-2.62+23.10/wrappers/services_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/wrappers/services_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "io/ioutil" "os" "os/user" "path/filepath" @@ -4100,7 +4099,7 @@ err := s.addSnapServices(info, false) c.Assert(err, IsNil) - content, err := ioutil.ReadFile(filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc2.service")) + content, err := os.ReadFile(filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc2.service")) c.Assert(err, IsNil) c.Check(strings.Contains(string(content), "\nWatchdogSec=12\n"), Equals, true) @@ -4109,7 +4108,7 @@ filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc4.service"), } for _, svcPath := range noWatchdog { - content, err := ioutil.ReadFile(svcPath) + content, err := os.ReadFile(svcPath) c.Assert(err, IsNil) c.Check(strings.Contains(string(content), "WatchdogSec="), Equals, false) } @@ -4530,11 +4529,11 @@ err := s.addSnapServices(info, false) c.Assert(err, IsNil) - content, err := ioutil.ReadFile(filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc2.service")) + content, err := os.ReadFile(filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc2.service")) c.Assert(err, IsNil) c.Check(strings.Contains(string(content), "\nRestartSec=12\n"), Equals, true) - content, err = ioutil.ReadFile(filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc3.service")) + content, err = os.ReadFile(filepath.Join(dirs.GlobalRootDir, "/etc/systemd/system/snap.hello-snap.svc3.service")) c.Assert(err, IsNil) c.Check(strings.Contains(string(content), "RestartSec="), Equals, false) } @@ -4765,7 +4764,7 @@ flags := wrappers.RestartServicesFlags{Reload: true, AlsoEnabledNonActive: true} err = wrappers.RestartServices(info.Services(), nil, &flags, progress.Null, s.perfTimings) - c.Assert(err, ErrorMatches, `some user services failed to restart or reload`) + c.Assert(err, ErrorMatches, `some user services failed to restart`) c.Check(s.sysdLog, DeepEquals, [][]string{ {"--user", "daemon-reload"}, {"--user", "show", "--property=Id,ActiveState,UnitFileState,Type,Names,NeedDaemonReload", srvFile}, diff -Nru snapd-2.62+23.10/x11/xauth.go snapd-2.63+23.10/x11/xauth.go --- snapd-2.62+23.10/x11/xauth.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/x11/xauth.go 2024-04-24 00:00:39.000000000 +0000 @@ -23,7 +23,6 @@ "encoding/binary" "fmt" "io" - "io/ioutil" "os" ) @@ -122,7 +121,7 @@ // MockXauthority will create a fake xauthority file and place it // on a temporary path which is returned as result. func MockXauthority(cookies int) (string, error) { - f, err := ioutil.TempFile("", "xauth") + f, err := os.CreateTemp("", "xauth") if err != nil { return "", err } diff -Nru snapd-2.62+23.10/x11/xauth_test.go snapd-2.63+23.10/x11/xauth_test.go --- snapd-2.62+23.10/x11/xauth_test.go 2024-03-21 20:06:09.000000000 +0000 +++ snapd-2.63+23.10/x11/xauth_test.go 2024-04-24 00:00:39.000000000 +0000 @@ -20,7 +20,6 @@ package x11_test import ( - "io/ioutil" "os" "testing" @@ -51,7 +50,7 @@ } func (s *xauthTestSuite) TestXauthFileExistsButHasInvalidContent(c *C) { - f, err := ioutil.TempFile("", "xauth") + f, err := os.CreateTemp("", "xauth") c.Assert(err, IsNil) defer os.Remove(f.Name())