diff -Nru ceph-16.2.0/ceph.spec ceph-16.2.1/ceph.spec --- ceph-16.2.0/ceph.spec 2021-03-30 21:16:27.000000000 +0000 +++ ceph-16.2.1/ceph.spec 2021-04-19 13:52:50.000000000 +0000 @@ -122,7 +122,7 @@ # main package definition ################################################################################# Name: ceph -Version: 16.2.0 +Version: 16.2.1 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -138,7 +138,7 @@ Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-16.2.0.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-16.2.1.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -1198,7 +1198,7 @@ # common ################################################################################# %prep -%autosetup -p1 -n ceph-16.2.0 +%autosetup -p1 -n ceph-16.2.1 %build # LTO can be enabled as soon as the following GCC bug is fixed: diff -Nru ceph-16.2.0/CMakeLists.txt ceph-16.2.1/CMakeLists.txt --- ceph-16.2.0/CMakeLists.txt 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/CMakeLists.txt 2021-04-19 13:50:07.000000000 +0000 @@ -716,4 +716,4 @@ EXCLUDES "*.js" "*.css" ".tox" "python-common/build") add_custom_target(tags DEPENDS ctags) -set(VERSION 16.2.0) +set(VERSION 16.2.1) diff -Nru ceph-16.2.0/debian/ceph-base.install ceph-16.2.1/debian/ceph-base.install --- ceph-16.2.0/debian/ceph-base.install 2021-03-26 09:38:35.000000000 +0000 +++ ceph-16.2.1/debian/ceph-base.install 2021-05-04 18:20:11.000000000 +0000 @@ -12,7 +12,6 @@ usr/sbin/ceph-create-keys usr/share/doc/ceph/sample.ceph.conf usr/share/man/man8/ceph-create-keys.8 -usr/share/man/man8/ceph-deploy.8 usr/share/man/man8/ceph-kvstore-tool.8 usr/share/man/man8/ceph-run.8 usr/share/man/man8/crushtool.8 diff -Nru ceph-16.2.0/debian/changelog ceph-16.2.1/debian/changelog --- ceph-16.2.0/debian/changelog 2021-04-10 07:14:20.000000000 +0000 +++ ceph-16.2.1/debian/changelog 2021-05-04 18:21:24.000000000 +0000 @@ -1,3 +1,20 @@ +ceph (16.2.1-0ubuntu1) impish; urgency=medium + + [ Chris MacNaughton ] + * d/ceph-base.install: Remove ceph-deploy man page installation + (LP: #1892448). + + [ James Page ] + * SECURITY UPDATE: New upstream point release (LP: #1925322): + - CVE-2021-20288 + * d/rules: remove temporary build objects after install to avoid + running out of disk space during package builds. + * d/p/bug1925347.patch: Cherry pick fix to revert ProtectClock + permissions change in systemd configurations which prevents the + ceph-osd process from starting (LP: #1925347). + + -- James Page Tue, 04 May 2021 19:21:24 +0100 + ceph (16.2.0-0ubuntu1) hirsute; urgency=medium [ Chris MacNaughton ] diff -Nru ceph-16.2.0/debian/gbp.conf ceph-16.2.1/debian/gbp.conf --- ceph-16.2.0/debian/gbp.conf 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/debian/gbp.conf 2021-05-04 18:21:08.000000000 +0000 @@ -1,3 +1,3 @@ [DEFAULT] -debian-branch = ubuntu/hirsute +debian-branch = ubuntu/impish pristine-tar = True diff -Nru ceph-16.2.0/debian/patches/bug1925347.patch ceph-16.2.1/debian/patches/bug1925347.patch --- ceph-16.2.0/debian/patches/bug1925347.patch 1970-01-01 00:00:00.000000000 +0000 +++ ceph-16.2.1/debian/patches/bug1925347.patch 2021-05-04 18:20:11.000000000 +0000 @@ -0,0 +1,154 @@ +From 833a53cfdfa30559d7608e03d6da432260e286a8 Mon Sep 17 00:00:00 2001 +From: Wong Hoi Sing Edison +Date: Wed, 14 Apr 2021 15:36:17 +0800 +Subject: [PATCH] systemd: remove `ProtectClock=true` for `ceph-osd@.service` + +Ceph 16.2.0 Pacific by https://github.com/ceph/ceph/commit/9a84d5a introduce following new systemd restriction: + + ProtectClock=true + ProtectHostname=true + ProtectKernelLogs=true + RestrictSUIDSGID=true + +BTW, `ceph-osd@.service` failed with `ProtectClock=true` unexpectly, also see: + + - + - + +This PR intruduce: + + - Remove `ProtectClock=true` for our systemd service templates + +Fixes: https://tracker.ceph.com/issues/50347 +Signed-off-by: Wong Hoi Sing Edison +(cherry picked from commit 85bc551b179d940a50cbdfd0c20848e3187c70a6) +--- + systemd/ceph-fuse@.service.in | 1 - + systemd/ceph-immutable-object-cache@.service.in | 1 - + systemd/ceph-mds@.service.in | 1 - + systemd/ceph-mgr@.service.in | 1 - + systemd/ceph-mon@.service.in | 1 - + systemd/ceph-osd@.service.in | 1 - + systemd/ceph-radosgw@.service.in | 1 - + systemd/ceph-rbd-mirror@.service.in | 1 - + systemd/cephfs-mirror@.service.in | 3 +-- + 9 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/systemd/ceph-fuse@.service.in b/systemd/ceph-fuse@.service.in +index 1ea4b17675a..9c12c9ba444 100644 +--- a/systemd/ceph-fuse@.service.in ++++ b/systemd/ceph-fuse@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + # ceph-fuse requires access to /dev fuse device + PrivateDevices=no +-ProtectClock=true + ProtectControlGroups=true + ProtectHostname=true + ProtectKernelLogs=true +diff --git a/systemd/ceph-immutable-object-cache@.service.in b/systemd/ceph-immutable-object-cache@.service.in +index f5782487f9e..62ff8dbd272 100644 +--- a/systemd/ceph-immutable-object-cache@.service.in ++++ b/systemd/ceph-immutable-object-cache@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mds@.service.in b/systemd/ceph-mds@.service.in +index 2884f587f97..afa36702f9c 100644 +--- a/systemd/ceph-mds@.service.in ++++ b/systemd/ceph-mds@.service.in +@@ -17,7 +17,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mgr@.service.in b/systemd/ceph-mgr@.service.in +index 1ee28285209..8fadc4746b3 100644 +--- a/systemd/ceph-mgr@.service.in ++++ b/systemd/ceph-mgr@.service.in +@@ -16,7 +16,6 @@ LockPersonality=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mon@.service.in b/systemd/ceph-mon@.service.in +index 994cdfd2869..b7c92f278e3 100644 +--- a/systemd/ceph-mon@.service.in ++++ b/systemd/ceph-mon@.service.in +@@ -22,7 +22,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=false + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-osd@.service.in b/systemd/ceph-osd@.service.in +index 4981417d620..046500efb66 100644 +--- a/systemd/ceph-osd@.service.in ++++ b/systemd/ceph-osd@.service.in +@@ -18,7 +18,6 @@ MemoryDenyWriteExecute=true + # Need NewPrivileges via `sudo smartctl` + NoNewPrivileges=false + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-radosgw@.service.in b/systemd/ceph-radosgw@.service.in +index cfff60c18b8..b7474705506 100644 +--- a/systemd/ceph-radosgw@.service.in ++++ b/systemd/ceph-radosgw@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-rbd-mirror@.service.in b/systemd/ceph-rbd-mirror@.service.in +index fe49f11116e..1057892dc99 100644 +--- a/systemd/ceph-rbd-mirror@.service.in ++++ b/systemd/ceph-rbd-mirror@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/cephfs-mirror@.service.in b/systemd/cephfs-mirror@.service.in +index a97d6ad8b57..bed9d195302 100644 +--- a/systemd/cephfs-mirror@.service.in ++++ b/systemd/cephfs-mirror@.service.in +@@ -15,7 +15,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +@@ -30,4 +29,4 @@ StartLimitInterval=30min + TasksMax=infinity + + [Install] +-WantedBy=cephfs-mirror.target +\ No newline at end of file ++WantedBy=cephfs-mirror.target +-- +2.30.2 + diff -Nru ceph-16.2.0/debian/patches/series ceph-16.2.1/debian/patches/series --- ceph-16.2.0/debian/patches/series 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/debian/patches/series 2021-05-04 18:20:11.000000000 +0000 @@ -14,3 +14,5 @@ # text relocation fix AARCH64 bug1917414.patch bug1914584.patch +# systemd permissions +bug1925347.patch diff -Nru ceph-16.2.0/debian/rules ceph-16.2.1/debian/rules --- ceph-16.2.0/debian/rules 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/debian/rules 2021-05-04 18:20:11.000000000 +0000 @@ -125,8 +125,8 @@ # from the package install - package ships unversioned modules. rm -f $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code/libec_*.so.* find $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code -type l -delete || : - # running out of disk space on riscv64 - rm -rf $(CURDIR)/obj-riscv64-linux-gnu + # avoid running out of disk space + rm -rf $(CURDIR)/obj-*-linux-gnu # doc/changelog is a directory, which confuses dh_installchangelogs diff -Nru ceph-16.2.0/doc/rados/operations/health-checks.rst ceph-16.2.1/doc/rados/operations/health-checks.rst --- ceph-16.2.0/doc/rados/operations/health-checks.rst 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/doc/rados/operations/health-checks.rst 2021-04-19 13:50:07.000000000 +0000 @@ -134,6 +134,73 @@ ceph config set global mon_data_size_warn +AUTH_INSECURE_GLOBAL_ID_RECLAIM +_______________________________ + +One or more clients or daemons are connected to the cluster that are +not securely reclaiming their global_id (a unique number identifying +each entity in the cluster) when reconnecting to a monitor. The +client is being permitted to connect anyway because the +``auth_allow_insecure_global_id_reclaim`` option is set to true (which may +be necessary until all ceph clients have been upgraded), and the +``auth_expose_insecure_global_id_reclaim`` option set to ``true`` (which +allows monitors to detect clients with insecure reclaim early by forcing them to +reconnect right after they first authenticate). + +You can identify which client(s) are using unpatched ceph client code with:: + + ceph health detail + +Clients global_id reclaim rehavior can also seen in the +``global_id_status`` field in the dump of clients connected to an +individual monitor (``reclaim_insecure`` means the client is +unpatched and is contributing to this health alert):: + + ceph tell mon.\* sessions + +We strongly recommend that all clients in the system are upgraded to a +newer version of Ceph that correctly reclaims global_id values. Once +all clients have been updated, you can stop allowing insecure reconnections +with:: + + ceph config set mon auth_allow_insecure_global_id_reclaim false + +If it is impractical to upgrade all clients immediately, you can silence +this warning temporarily with:: + + ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM 1w # 1 week + +Although we do NOT recommend doing so, you can also disable this warning indefinitely +with:: + + ceph config set mon mon_warn_on_insecure_global_id_reclaim false + +AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED +_______________________________________ + +Ceph is currently configured to allow clients to reconnect to monitors using +an insecure process to reclaim their previous global_id because the setting +``auth_allow_insecure_global_id_reclaim`` is set to ``true``. It may be necessary to +leave this setting enabled while existing Ceph clients are upgraded to newer +versions of Ceph that correctly and securely reclaim their global_id. + +If the ``AUTH_INSECURE_GLOBAL_ID_RECLAIM`` health alert has not also been raised and +the ``auth_expose_insecure_global_id_reclaim`` setting has not been disabled (it is +on by default), then there are currently no clients connected that need to be +upgraded, and it is safe to disallow insecure global_id reclaim with:: + + ceph config set mon auth_allow_insecure_global_id_reclaim false + +If there are still clients that need to be upgraded, then this alert can be +silenced temporarily with:: + + ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED 1w # 1 week + +Although we do NOT recommend doing so, you can also disable this warning indefinitely +with:: + + ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false + Manager ------- diff -Nru ceph-16.2.0/qa/standalone/ceph-helpers.sh ceph-16.2.1/qa/standalone/ceph-helpers.sh --- ceph-16.2.0/qa/standalone/ceph-helpers.sh 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/qa/standalone/ceph-helpers.sh 2021-04-19 13:50:07.000000000 +0000 @@ -481,6 +481,7 @@ --mon-allow-pool-size-one \ --osd-pool-default-pg-autoscale-mode off \ --mon-osd-backfillfull-ratio .99 \ + --mon-warn-on-insecure-global-id-reclaim-allowed=false \ "$@" || return 1 cat > $dir/ceph.conf <entity_name.get_type() && !this->global_id && + global_id_status == global_id_status_t::NONE); + + ldout(cct, 10) << __func__ << " entity_name=" << entity_name + << " global_id=" << global_id << " is_new_global_id=" + << is_new_global_id << dendl; + this->entity_name = entity_name; + this->global_id = global_id; + + return do_start_session(is_new_global_id, result, caps); +} + AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks) { switch (type) { diff -Nru ceph-16.2.0/src/auth/AuthServiceHandler.h ceph-16.2.1/src/auth/AuthServiceHandler.h --- ceph-16.2.0/src/auth/AuthServiceHandler.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/AuthServiceHandler.h 2021-04-19 13:50:07.000000000 +0000 @@ -25,32 +25,57 @@ class CryptoKey; struct AuthCapsInfo; +enum class global_id_status_t { + NONE, + // fresh client (global_id == 0); waiting for CephXAuthenticate + NEW_PENDING, + // connected client; new enough to correctly reclaim global_id + NEW_OK, + // connected client; unknown whether it can reclaim global_id correctly + NEW_NOT_EXPOSED, + // reconnecting client (global_id != 0); waiting for CephXAuthenticate + RECLAIM_PENDING, + // reconnected client; correctly reclaimed global_id + RECLAIM_OK, + // reconnected client; did not properly prove prior global_id ownership + RECLAIM_INSECURE +}; + +std::ostream& operator<<(std::ostream& os, + global_id_status_t global_id_status); + struct AuthServiceHandler { protected: CephContext *cct; -public: EntityName entity_name; - uint64_t global_id; + uint64_t global_id = 0; + global_id_status_t global_id_status = global_id_status_t::NONE; - explicit AuthServiceHandler(CephContext *cct_) : cct(cct_), global_id(0) {} +public: + explicit AuthServiceHandler(CephContext *cct_) : cct(cct_) {} virtual ~AuthServiceHandler() { } - virtual int start_session(const EntityName& name, - size_t connection_secret_required_length, - ceph::buffer::list *result, - AuthCapsInfo *caps, - CryptoKey *session_key, - std::string *connection_secret) = 0; + int start_session(const EntityName& entity_name, + uint64_t global_id, + bool is_new_global_id, + ceph::buffer::list *result, + AuthCapsInfo *caps); virtual int handle_request(ceph::buffer::list::const_iterator& indata, size_t connection_secret_required_length, ceph::buffer::list *result, - uint64_t *global_id, AuthCapsInfo *caps, CryptoKey *session_key, std::string *connection_secret) = 0; - EntityName& get_entity_name() { return entity_name; } + const EntityName& get_entity_name() { return entity_name; } + uint64_t get_global_id() { return global_id; } + global_id_status_t get_global_id_status() { return global_id_status; } + +private: + virtual int do_start_session(bool is_new_global_id, + ceph::buffer::list *result, + AuthCapsInfo *caps) = 0; }; extern AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks); diff -Nru ceph-16.2.0/src/auth/cephx/CephxClientHandler.h ceph-16.2.1/src/auth/cephx/CephxClientHandler.h --- ceph-16.2.0/src/auth/cephx/CephxClientHandler.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxClientHandler.h 2021-04-19 13:50:07.000000000 +0000 @@ -48,6 +48,10 @@ reset(); } + CephxClientHandler* clone() const override { + return new CephxClientHandler(*this); + } + void reset() override; void prepare_build_request() override; int build_request(ceph::buffer::list& bl) const override; diff -Nru ceph-16.2.0/src/auth/cephx/CephxKeyServer.cc ceph-16.2.1/src/auth/cephx/CephxKeyServer.cc --- ceph-16.2.0/src/auth/cephx/CephxKeyServer.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxKeyServer.cc 2021-04-19 13:50:07.000000000 +0000 @@ -30,7 +30,8 @@ using ceph::Formatter; bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id, - ExpiringCryptoKey& secret, uint64_t& secret_id) const + CryptoKey& secret, uint64_t& secret_id, + double& ttl) const { auto iter = rotating_secrets.find(service_id); if (iter == rotating_secrets.end()) { @@ -45,25 +46,25 @@ if (secrets.secrets.size() > 1) ++riter; - if (riter->second.expiration < ceph_clock_now()) + utime_t now = ceph_clock_now(); + if (riter->second.expiration < now) ++riter; // "current" key has expired, use "next" key instead secret_id = riter->first; - secret = riter->second; - ldout(cct, 30) << "get_service_secret service " << ceph_entity_type_name(service_id) - << " id " << secret_id << " " << secret << dendl; - return true; -} - -bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id, - CryptoKey& secret, uint64_t& secret_id) const -{ - ExpiringCryptoKey e; - - if (!get_service_secret(cct, service_id, e, secret_id)) - return false; + secret = riter->second.key; - secret = e.key; + // ttl may have just been increased by the user + // cap it by expiration of "next" key to prevent handing out a ticket + // with a bogus, possibly way into the future, validity + ttl = service_id == CEPH_ENTITY_TYPE_AUTH ? + cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl; + ttl = min(ttl, static_cast( + secrets.secrets.rbegin()->second.expiration - now)); + + ldout(cct, 30) << __func__ << " service " + << ceph_entity_type_name(service_id) << " secret_id " + << secret_id << " " << riter->second << " ttl " << ttl + << dendl; return true; } @@ -236,12 +237,12 @@ return data.get_caps(cct, name, type, caps_info); } -bool KeyServer::get_service_secret(uint32_t service_id, - CryptoKey& secret, uint64_t& secret_id) const +bool KeyServer::get_service_secret(uint32_t service_id, CryptoKey& secret, + uint64_t& secret_id, double& ttl) const { std::scoped_lock l{lock}; - return data.get_service_secret(cct, service_id, secret, secret_id); + return data.get_service_secret(cct, service_id, secret, secret_id, ttl); } bool KeyServer::get_service_secret(uint32_t service_id, @@ -413,12 +414,13 @@ int KeyServer::_build_session_auth_info(uint32_t service_id, const AuthTicket& parent_ticket, - CephXSessionAuthInfo& info) + CephXSessionAuthInfo& info, + double ttl) { info.service_id = service_id; info.ticket = parent_ticket; - info.ticket.init_timestamps(ceph_clock_now(), - cct->_conf->auth_service_ticket_ttl); + info.ticket.init_timestamps(ceph_clock_now(), ttl); + info.validity.set_from_double(ttl); generate_secret(info.session_key); @@ -436,25 +438,27 @@ const AuthTicket& parent_ticket, CephXSessionAuthInfo& info) { - if (!get_service_secret(service_id, info.service_secret, info.secret_id)) { + double ttl; + if (!get_service_secret(service_id, info.service_secret, info.secret_id, + ttl)) { return -EACCES; } std::scoped_lock l{lock}; - - return _build_session_auth_info(service_id, parent_ticket, info); + return _build_session_auth_info(service_id, parent_ticket, info, ttl); } int KeyServer::build_session_auth_info(uint32_t service_id, const AuthTicket& parent_ticket, - CephXSessionAuthInfo& info, - CryptoKey& service_secret, - uint64_t secret_id) + const CryptoKey& service_secret, + uint64_t secret_id, + CephXSessionAuthInfo& info) { info.service_secret = service_secret; info.secret_id = secret_id; std::scoped_lock l{lock}; - return _build_session_auth_info(service_id, parent_ticket, info); + return _build_session_auth_info(service_id, parent_ticket, info, + cct->_conf->auth_service_ticket_ttl); } diff -Nru ceph-16.2.0/src/auth/cephx/CephxKeyServer.h ceph-16.2.1/src/auth/cephx/CephxKeyServer.h --- ceph-16.2.0/src/auth/cephx/CephxKeyServer.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxKeyServer.h 2021-04-19 13:50:07.000000000 +0000 @@ -94,9 +94,8 @@ } bool get_service_secret(CephContext *cct, uint32_t service_id, - ExpiringCryptoKey& secret, uint64_t& secret_id) const; - bool get_service_secret(CephContext *cct, uint32_t service_id, - CryptoKey& secret, uint64_t& secret_id) const; + CryptoKey& secret, uint64_t& secret_id, + double& ttl) const; bool get_service_secret(CephContext *cct, uint32_t service_id, uint64_t secret_id, CryptoKey& secret) const; bool get_auth(const EntityName& name, EntityAuth& auth) const; @@ -201,7 +200,8 @@ void _dump_rotating_secrets(); int _build_session_auth_info(uint32_t service_id, const AuthTicket& parent_ticket, - CephXSessionAuthInfo& info); + CephXSessionAuthInfo& info, + double ttl); bool _get_service_caps(const EntityName& name, uint32_t service_id, AuthCapsInfo& caps) const; public: @@ -220,13 +220,13 @@ CephXSessionAuthInfo& info); int build_session_auth_info(uint32_t service_id, const AuthTicket& parent_ticket, - CephXSessionAuthInfo& info, - CryptoKey& service_secret, - uint64_t secret_id); + const CryptoKey& service_secret, + uint64_t secret_id, + CephXSessionAuthInfo& info); /* get current secret for specific service type */ - bool get_service_secret(uint32_t service_id, CryptoKey& service_key, - uint64_t& secret_id) const; + bool get_service_secret(uint32_t service_id, CryptoKey& secret, + uint64_t& secret_id, double& ttl) const; bool get_service_secret(uint32_t service_id, uint64_t secret_id, CryptoKey& secret) const override; diff -Nru ceph-16.2.0/src/auth/cephx/CephxProtocol.cc ceph-16.2.1/src/auth/cephx/CephxProtocol.cc --- ceph-16.2.0/src/auth/cephx/CephxProtocol.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxProtocol.cc 2021-04-19 13:50:07.000000000 +0000 @@ -377,8 +377,10 @@ << " need " << need << dendl; } -bool cephx_decode_ticket(CephContext *cct, KeyStore *keys, uint32_t service_id, - CephXTicketBlob& ticket_blob, CephXServiceTicketInfo& ticket_info) +bool cephx_decode_ticket(CephContext *cct, KeyStore *keys, + uint32_t service_id, + const CephXTicketBlob& ticket_blob, + CephXServiceTicketInfo& ticket_info) { uint64_t secret_id = ticket_blob.secret_id; CryptoKey service_secret; diff -Nru ceph-16.2.0/src/auth/cephx/CephxProtocol.h ceph-16.2.1/src/auth/cephx/CephxProtocol.h --- ceph-16.2.0/src/auth/cephx/CephxProtocol.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxProtocol.h 2021-04-19 13:50:07.000000000 +0000 @@ -123,9 +123,11 @@ CephXTicketBlob old_ticket; uint32_t other_keys = 0; // replaces CephXServiceTicketRequest + bool old_ticket_may_be_omitted; + void encode(ceph::buffer::list& bl) const { using ceph::encode; - __u8 struct_v = 2; + __u8 struct_v = 3; encode(struct_v, bl); encode(client_challenge, bl); encode(key, bl); @@ -142,6 +144,13 @@ if (struct_v >= 2) { decode(other_keys, bl); } + + // v2 and v3 encodings are the same, but: + // - some clients that send v1 or v2 don't populate old_ticket + // on reconnects (but do on renewals) + // - any client that sends v3 or later is expected to populate + // old_ticket both on reconnects and renewals + old_ticket_may_be_omitted = struct_v < 3; } }; WRITE_CLASS_ENCODER(CephXAuthenticate) @@ -415,7 +424,8 @@ * Decode an extract ticket */ bool cephx_decode_ticket(CephContext *cct, KeyStore *keys, - uint32_t service_id, CephXTicketBlob& ticket_blob, + uint32_t service_id, + const CephXTicketBlob& ticket_blob, CephXServiceTicketInfo& ticket_info); /* diff -Nru ceph-16.2.0/src/auth/cephx/CephxServiceHandler.cc ceph-16.2.1/src/auth/cephx/CephxServiceHandler.cc --- ceph-16.2.0/src/auth/cephx/CephxServiceHandler.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxServiceHandler.cc 2021-04-19 13:50:07.000000000 +0000 @@ -35,15 +35,13 @@ using ceph::decode; using ceph::encode; -int CephxServiceHandler::start_session( - const EntityName& name, - size_t connection_secret_required_length, +int CephxServiceHandler::do_start_session( + bool is_new_global_id, bufferlist *result_bl, - AuthCapsInfo *caps, - CryptoKey *session_key, - std::string *connection_secret) + AuthCapsInfo *caps) { - entity_name = name; + global_id_status = is_new_global_id ? global_id_status_t::NEW_PENDING : + global_id_status_t::RECLAIM_PENDING; uint64_t min = 1; // always non-zero uint64_t max = std::numeric_limits::max(); @@ -57,11 +55,90 @@ return 0; } +int CephxServiceHandler::verify_old_ticket( + const CephXAuthenticate& req, + CephXServiceTicketInfo& old_ticket_info, + bool& should_enc_ticket) +{ + ldout(cct, 20) << " checking old_ticket: secret_id=" + << req.old_ticket.secret_id + << " len=" << req.old_ticket.blob.length() + << ", old_ticket_may_be_omitted=" + << req.old_ticket_may_be_omitted << dendl; + ceph_assert(global_id_status != global_id_status_t::NONE); + if (global_id_status == global_id_status_t::NEW_PENDING) { + // old ticket is not needed + if (req.old_ticket.blob.length()) { + ldout(cct, 0) << " superfluous ticket presented" << dendl; + return -EINVAL; + } + if (req.old_ticket_may_be_omitted) { + ldout(cct, 10) << " new global_id " << global_id + << " (unexposed legacy client)" << dendl; + global_id_status = global_id_status_t::NEW_NOT_EXPOSED; + } else { + ldout(cct, 10) << " new global_id " << global_id << dendl; + global_id_status = global_id_status_t::NEW_OK; + } + return 0; + } + + if (!req.old_ticket.blob.length()) { + // old ticket is needed but not presented + if (cct->_conf->auth_allow_insecure_global_id_reclaim && + req.old_ticket_may_be_omitted) { + ldout(cct, 10) << " allowing reclaim of global_id " << global_id + << " with no ticket presented (legacy client, auth_allow_insecure_global_id_reclaim=true)" + << dendl; + global_id_status = global_id_status_t::RECLAIM_INSECURE; + return 0; + } + ldout(cct, 0) << " attempt to reclaim global_id " << global_id + << " without presenting ticket" << dendl; + return -EACCES; + } + + if (!cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH, + req.old_ticket, old_ticket_info)) { + if (cct->_conf->auth_allow_insecure_global_id_reclaim && + req.old_ticket_may_be_omitted) { + ldout(cct, 10) << " allowing reclaim of global_id " << global_id + << " using bad ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)" + << dendl; + global_id_status = global_id_status_t::RECLAIM_INSECURE; + return 0; + } + ldout(cct, 0) << " attempt to reclaim global_id " << global_id + << " using bad ticket" << dendl; + return -EACCES; + } + ldout(cct, 20) << " decoded old_ticket: global_id=" + << old_ticket_info.ticket.global_id << dendl; + if (global_id != old_ticket_info.ticket.global_id) { + if (cct->_conf->auth_allow_insecure_global_id_reclaim && + req.old_ticket_may_be_omitted) { + ldout(cct, 10) << " allowing reclaim of global_id " << global_id + << " using mismatching ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)" + << dendl; + global_id_status = global_id_status_t::RECLAIM_INSECURE; + return 0; + } + ldout(cct, 0) << " attempt to reclaim global_id " << global_id + << " using mismatching ticket" << dendl; + return -EACCES; + } + ldout(cct, 10) << " allowing reclaim of global_id " << global_id + << " (valid ticket presented, will encrypt new ticket)" + << dendl; + global_id_status = global_id_status_t::RECLAIM_OK; + should_enc_ticket = true; + return 0; +} + int CephxServiceHandler::handle_request( bufferlist::const_iterator& indata, size_t connection_secret_required_len, bufferlist *result_bl, - uint64_t *global_id, AuthCapsInfo *caps, CryptoKey *psession_key, std::string *pconnection_secret) @@ -133,23 +210,28 @@ ret = -EACCES; break; } + CephXServiceTicketInfo old_ticket_info; + ret = verify_old_ticket(req, old_ticket_info, should_enc_ticket); + if (ret) { + ldout(cct, 0) << " could not verify old ticket" << dendl; + break; + } - if (cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH, - req.old_ticket, old_ticket_info)) { - *global_id = old_ticket_info.ticket.global_id; - ldout(cct, 10) << "decoded old_ticket with global_id=" << *global_id - << dendl; - should_enc_ticket = true; + double ttl; + if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH, + info.service_secret, info.secret_id, + ttl)) { + ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl; + ret = -EIO; + break; } - ldout(cct,10) << __func__ << " auth ticket global_id " << *global_id - << dendl; - info.ticket.init_timestamps(ceph_clock_now(), - cct->_conf->auth_mon_ticket_ttl); + info.service_id = CEPH_ENTITY_TYPE_AUTH; info.ticket.name = entity_name; - info.ticket.global_id = *global_id; - info.validity += cct->_conf->auth_mon_ticket_ttl; + info.ticket.global_id = global_id; + info.ticket.init_timestamps(ceph_clock_now(), ttl); + info.validity.set_from_double(ttl); key_server->generate_secret(session_key); @@ -157,12 +239,6 @@ if (psession_key) { *psession_key = session_key; } - info.service_id = CEPH_ENTITY_TYPE_AUTH; - if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH, info.service_secret, info.secret_id)) { - ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl; - ret = -EIO; - break; - } vector info_vec; info_vec.push_back(info); @@ -208,11 +284,14 @@ } } encode(cbl, *result_bl); - // provite all of the other tickets at the same time + // provide requested service tickets at the same time vector info_vec; for (uint32_t service_id = 1; service_id <= req.other_keys; service_id <<= 1) { - if (req.other_keys & service_id) { + // skip CEPH_ENTITY_TYPE_AUTH: auth ticket is already encoded + // (possibly encrypted with the old session key) + if ((req.other_keys & service_id) && + service_id != CEPH_ENTITY_TYPE_AUTH) { ldout(cct, 10) << " adding key for service " << ceph_entity_type_name(service_id) << dendl; CephXSessionAuthInfo svc_info; @@ -220,7 +299,6 @@ service_id, info.ticket, svc_info); - svc_info.validity += cct->_conf->auth_service_ticket_ttl; info_vec.push_back(svc_info); } } @@ -272,7 +350,10 @@ int service_err = 0; for (uint32_t service_id = 1; service_id <= ticket_req.keys; service_id <<= 1) { - if (ticket_req.keys & service_id) { + // skip CEPH_ENTITY_TYPE_AUTH: auth ticket must be obtained with + // CEPHX_GET_AUTH_SESSION_KEY + if ((ticket_req.keys & service_id) && + service_id != CEPH_ENTITY_TYPE_AUTH) { ldout(cct, 10) << " adding key for service " << ceph_entity_type_name(service_id) << dendl; CephXSessionAuthInfo info; @@ -287,7 +368,6 @@ service_err = r; continue; } - info.validity += cct->_conf->auth_service_ticket_ttl; info_vec.push_back(info); ++found_services; } diff -Nru ceph-16.2.0/src/auth/cephx/CephxServiceHandler.h ceph-16.2.1/src/auth/cephx/CephxServiceHandler.h --- ceph-16.2.0/src/auth/cephx/CephxServiceHandler.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/cephx/CephxServiceHandler.h 2021-04-19 13:50:07.000000000 +0000 @@ -19,6 +19,8 @@ #include "auth/Auth.h" class KeyServer; +struct CephXAuthenticate; +struct CephXServiceTicketInfo; class CephxServiceHandler : public AuthServiceHandler { KeyServer *key_server; @@ -29,22 +31,24 @@ : AuthServiceHandler(cct_), key_server(ks), server_challenge(0) {} ~CephxServiceHandler() override {} - int start_session(const EntityName& name, - size_t connection_secret_required_length, - ceph::buffer::list *result_bl, - AuthCapsInfo *caps, - CryptoKey *session_key, - std::string *connection_secret) override; int handle_request( ceph::buffer::list::const_iterator& indata, size_t connection_secret_required_length, ceph::buffer::list *result_bl, - uint64_t *global_id, AuthCapsInfo *caps, CryptoKey *session_key, std::string *connection_secret) override; - void build_cephx_response_header(int request_type, int status, ceph::buffer::list& bl); +private: + int do_start_session(bool is_new_global_id, + ceph::buffer::list *result_bl, + AuthCapsInfo *caps) override; + + int verify_old_ticket(const CephXAuthenticate& req, + CephXServiceTicketInfo& old_ticket_info, + bool& should_enc_ticket); + void build_cephx_response_header(int request_type, int status, + ceph::buffer::list& bl); }; #endif diff -Nru ceph-16.2.0/src/auth/krb/KrbClientHandler.hpp ceph-16.2.1/src/auth/krb/KrbClientHandler.hpp --- ceph-16.2.0/src/auth/krb/KrbClientHandler.hpp 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/krb/KrbClientHandler.hpp 2021-04-19 13:50:07.000000000 +0000 @@ -39,7 +39,11 @@ reset(); } ~KrbClientHandler() override; - + + KrbClientHandler* clone() const override { + return new KrbClientHandler(*this); + } + int get_protocol() const override { return CEPH_AUTH_GSS; } void reset() override { m_gss_client_name = GSS_C_NO_NAME; diff -Nru ceph-16.2.0/src/auth/krb/KrbServiceHandler.cpp ceph-16.2.1/src/auth/krb/KrbServiceHandler.cpp --- ceph-16.2.0/src/auth/krb/KrbServiceHandler.cpp 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/krb/KrbServiceHandler.cpp 2021-04-19 13:50:07.000000000 +0000 @@ -30,7 +30,6 @@ bufferlist::const_iterator& indata, size_t connection_secret_required_length, bufferlist *buff_list, - uint64_t *global_id, AuthCapsInfo *caps, CryptoKey *session_key, std::string *connection_secret) @@ -152,13 +151,10 @@ return result; } -int KrbServiceHandler::start_session( - const EntityName& name, - size_t connection_secret_required_length, +int KrbServiceHandler::do_start_session( + bool is_new_global_id, bufferlist *buff_list, - AuthCapsInfo *caps, - CryptoKey *session_key, - std::string *connection_secret) + AuthCapsInfo *caps) { gss_buffer_desc gss_buffer_in = {0, nullptr}; gss_OID gss_object_id = GSS_C_NT_HOSTBASED_SERVICE; @@ -170,7 +166,6 @@ gss_buffer_in.length = gss_service_name.length(); gss_buffer_in.value = (const_cast(gss_service_name.c_str())); - entity_name = name; gss_major_status = gss_import_name(&gss_minor_status, &gss_buffer_in, diff -Nru ceph-16.2.0/src/auth/krb/KrbServiceHandler.hpp ceph-16.2.1/src/auth/krb/KrbServiceHandler.hpp --- ceph-16.2.0/src/auth/krb/KrbServiceHandler.hpp 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/krb/KrbServiceHandler.hpp 2021-04-19 13:50:07.000000000 +0000 @@ -40,19 +40,15 @@ int handle_request(bufferlist::const_iterator& indata, size_t connection_secret_required_length, bufferlist *buff_list, - uint64_t *global_id, AuthCapsInfo *caps, CryptoKey *session_key, std::string *connection_secret) override; - int start_session(const EntityName& name, - size_t connection_secret_required_length, - bufferlist *buff_list, - AuthCapsInfo *caps, - CryptoKey *session_key, - std::string *connection_secret) override; - private: + int do_start_session(bool is_new_global_id, + ceph::buffer::list *buff_list, + AuthCapsInfo *caps) override; + gss_buffer_desc m_gss_buffer_out; gss_cred_id_t m_gss_credentials; gss_ctx_id_t m_gss_sec_ctx; diff -Nru ceph-16.2.0/src/auth/none/AuthNoneClientHandler.h ceph-16.2.1/src/auth/none/AuthNoneClientHandler.h --- ceph-16.2.0/src/auth/none/AuthNoneClientHandler.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/none/AuthNoneClientHandler.h 2021-04-19 13:50:07.000000000 +0000 @@ -26,6 +26,10 @@ AuthNoneClientHandler(CephContext *cct_) : AuthClientHandler(cct_) {} + AuthNoneClientHandler* clone() const override { + return new AuthNoneClientHandler(*this); + } + void reset() override { } void prepare_build_request() override {} diff -Nru ceph-16.2.0/src/auth/none/AuthNoneServiceHandler.h ceph-16.2.1/src/auth/none/AuthNoneServiceHandler.h --- ceph-16.2.0/src/auth/none/AuthNoneServiceHandler.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/auth/none/AuthNoneServiceHandler.h 2021-04-19 13:50:07.000000000 +0000 @@ -25,27 +25,21 @@ : AuthServiceHandler(cct_) {} ~AuthNoneServiceHandler() override {} - int start_session(const EntityName& name, - size_t connection_secret_required_length, - ceph::buffer::list *result_bl, - AuthCapsInfo *caps, - CryptoKey *session_key, - std::string *connection_secret) override { - entity_name = name; - caps->allow_all = true; - return 1; - } int handle_request(ceph::buffer::list::const_iterator& indata, size_t connection_secret_required_length, ceph::buffer::list *result_bl, - uint64_t *global_id, AuthCapsInfo *caps, CryptoKey *session_key, std::string *connection_secret) override { return 0; } - void build_cephx_response_header(int request_type, int status, - ceph::buffer::list& bl) { + +private: + int do_start_session(bool is_new_global_id, + ceph::buffer::list *result_bl, + AuthCapsInfo *caps) override { + caps->allow_all = true; + return 1; } }; diff -Nru ceph-16.2.0/src/cephadm/cephadm ceph-16.2.1/src/cephadm/cephadm --- ceph-16.2.0/src/cephadm/cephadm 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/cephadm/cephadm 2021-04-19 13:50:07.000000000 +0000 @@ -3755,8 +3755,15 @@ if not cp.has_section('global'): cp.add_section('global') cp.set('global', 'fsid', fsid) - cp.set('global', 'mon host', mon_addr) + cp.set('global', 'mon_host', mon_addr) cp.set('global', 'container_image', image) + if not cp.has_section('mon'): + cp.add_section('mon') + if ( + not cp.has_option('mon', 'auth_allow_insecure_global_id_reclaim') + and not cp.has_option('mon', 'auth allow insecure global id reclaim') + ): + cp.set('mon', 'auth_allow_insecure_global_id_reclaim', 'false') cpf = StringIO() cp.write(cpf) config = cpf.getvalue() diff -Nru ceph-16.2.0/src/common/legacy_config_opts.h ceph-16.2.1/src/common/legacy_config_opts.h --- ceph-16.2.0/src/common/legacy_config_opts.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/common/legacy_config_opts.h 2021-04-19 13:50:07.000000000 +0000 @@ -329,6 +329,8 @@ OPTION(cephx_sign_messages, OPT_BOOL) // Default to signing session messages if supported OPTION(auth_mon_ticket_ttl, OPT_DOUBLE) OPTION(auth_service_ticket_ttl, OPT_DOUBLE) +OPTION(auth_allow_insecure_global_id_reclaim, OPT_BOOL) +OPTION(auth_expose_insecure_global_id_reclaim, OPT_BOOL) OPTION(auth_debug, OPT_BOOL) // if true, assert when weird things happen OPTION(mon_client_hunt_parallel, OPT_U32) // how many mons to try to connect to in parallel during hunt OPTION(mon_client_hunt_interval, OPT_DOUBLE) // try new mon every N seconds until we connect diff -Nru ceph-16.2.0/src/common/options.cc ceph-16.2.1/src/common/options.cc --- ceph-16.2.0/src/common/options.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/common/options.cc 2021-04-19 13:50:07.000000000 +0000 @@ -1780,6 +1780,22 @@ .add_service("mon") .set_description("time before OSDs who do not report to the mons are marked down (seconds)"), + Option("mon_warn_on_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED) + .set_default(true) + .add_service("mon") + .set_description("issue AUTH_INSECURE_GLOBAL_ID_RECLAIM health warning if any connected clients are insecurely reclaiming global_id") + .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed") + .add_see_also("auth_allow_insecure_global_id_reclaim") + .add_see_also("auth_expose_insecure_global_id_reclaim"), + + Option("mon_warn_on_insecure_global_id_reclaim_allowed", Option::TYPE_BOOL, Option::LEVEL_ADVANCED) + .set_default(true) + .add_service("mon") + .set_description("issue AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED health warning if insecure global_id reclaim is allowed") + .add_see_also("mon_warn_on_insecure_global_id_reclaim") + .add_see_also("auth_allow_insecure_global_id_reclaim") + .add_see_also("auth_expose_insecure_global_id_reclaim"), + Option("mon_warn_on_msgr2_not_enabled", Option::TYPE_BOOL, Option::LEVEL_ADVANCED) .set_default(true) .add_service("mon") @@ -2361,13 +2377,29 @@ .set_description(""), Option("auth_mon_ticket_ttl", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED) - .set_default(12_hr) + .set_default(72_hr) .set_description(""), Option("auth_service_ticket_ttl", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED) .set_default(1_hr) .set_description(""), + Option("auth_allow_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED) + .set_default(true) + .set_description("Allow reclaiming global_id without presenting a valid ticket proving previous possession of that global_id") + .set_long_description("Allowing unauthorized global_id (re)use poses a security risk. Unfortunately, older clients may omit their ticket on reconnects and therefore rely on this being allowed for preserving their global_id for the lifetime of the client instance. Setting this value to false would immediately prevent new connections from those clients (assuming auth_expose_insecure_global_id_reclaim set to true) and eventually break existing sessions as well (regardless of auth_expose_insecure_global_id_reclaim setting).") + .add_see_also("mon_warn_on_insecure_global_id_reclaim") + .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed") + .add_see_also("auth_expose_insecure_global_id_reclaim"), + + Option("auth_expose_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED) + .set_default(true) + .set_description("Force older clients that may omit their ticket on reconnects to reconnect as part of establishing a session") + .set_long_description("In permissive mode (auth_allow_insecure_global_id_reclaim set to true), this helps with identifying clients that are not patched. In enforcing mode (auth_allow_insecure_global_id_reclaim set to false), this is a fail-fast mechanism: don't establish a session that will almost inevitably be broken later.") + .add_see_also("mon_warn_on_insecure_global_id_reclaim") + .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed") + .add_see_also("auth_allow_insecure_global_id_reclaim"), + Option("auth_debug", Option::TYPE_BOOL, Option::LEVEL_DEV) .set_default(false) .set_description(""), diff -Nru ceph-16.2.0/src/.git_version ceph-16.2.1/src/.git_version --- ceph-16.2.0/src/.git_version 2021-03-30 21:16:27.000000000 +0000 +++ ceph-16.2.1/src/.git_version 2021-04-19 13:52:50.000000000 +0000 @@ -1,2 +1,2 @@ -0c2054e95bcd9b30fdd908a79ac1d8bbc3394442 -16.2.0 +afb9061ab4117f798c858c741efa6390e48ccf10 +16.2.1 diff -Nru ceph-16.2.0/src/mon/AuthMonitor.cc ceph-16.2.1/src/mon/AuthMonitor.cc --- ceph-16.2.0/src/mon/AuthMonitor.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/mon/AuthMonitor.cc 2021-04-19 13:50:07.000000000 +0000 @@ -615,6 +615,7 @@ bool start = false; bool finished = false; EntityName entity_name; + bool is_new_global_id = false; // set up handler? if (m->protocol == 0 && !s->auth_handler) { @@ -734,23 +735,23 @@ ceph_assert(!paxos_writable); return false; } + is_new_global_id = true; } try { if (start) { // new session ret = s->auth_handler->start_session(entity_name, - 0, // no connection_secret needed + s->con->peer_global_id, + is_new_global_id, &response_bl, - &s->con->peer_caps_info, - nullptr, nullptr); + &s->con->peer_caps_info); } else { // request ret = s->auth_handler->handle_request( indata, 0, // no connection_secret needed &response_bl, - &s->con->peer_global_id, &s->con->peer_caps_info, nullptr, nullptr); } diff -Nru ceph-16.2.0/src/mon/HealthMonitor.cc ceph-16.2.1/src/mon/HealthMonitor.cc --- ceph-16.2.0/src/mon/HealthMonitor.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/mon/HealthMonitor.cc 2021-04-19 13:50:07.000000000 +0000 @@ -571,6 +571,7 @@ { dout(20) << __func__ << dendl; bool changed = false; + const auto max = g_conf().get_val("mon_health_max_detail"); // snapshot of usage DataStats stats; @@ -640,6 +641,44 @@ } } + // AUTH_INSECURE_GLOBAL_ID_RECLAIM + if (g_conf().get_val("mon_warn_on_insecure_global_id_reclaim") && + g_conf().get_val("auth_allow_insecure_global_id_reclaim")) { + // Warn if there are any clients that are insecurely renewing their global_id + std::lock_guard l(mon.session_map_lock); + list detail; + for (auto p = mon.session_map.sessions.begin(); + p != mon.session_map.sessions.end(); + ++p) { + if ((*p)->global_id_status == global_id_status_t::RECLAIM_INSECURE) { + ostringstream ds; + ds << (*p)->entity_name << " at " << (*p)->addrs + << " is using insecure global_id reclaim"; + detail.push_back(ds.str()); + if (detail.size() >= max) { + detail.push_back("..."); + break; + } + } + } + if (!detail.empty()) { + ostringstream ss; + ss << "client%plurals% %isorare% using insecure global_id reclaim"; + auto& d = next.add("AUTH_INSECURE_GLOBAL_ID_RECLAIM", HEALTH_WARN, ss.str(), + detail.size()); + d.detail.swap(detail); + } + } + // AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED + if (g_conf().get_val("mon_warn_on_insecure_global_id_reclaim_allowed") && + g_conf().get_val("auth_allow_insecure_global_id_reclaim")) { + ostringstream ss, ds; + ss << "mon%plurals% %isorare% allowing insecure global_id reclaim"; + auto& d = next.add("AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED", HEALTH_WARN, ss.str(), 1); + ds << "mon." << mon.name << " has auth_allow_insecure_global_id_reclaim set to true"; + d.detail.push_back(ds.str()); + } + auto p = quorum_checks.find(mon.rank); if (p == quorum_checks.end()) { if (next.empty()) { diff -Nru ceph-16.2.0/src/mon/MonClient.cc ceph-16.2.1/src/mon/MonClient.cc --- ceph-16.2.0/src/mon/MonClient.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/mon/MonClient.cc 2021-04-19 13:50:07.000000000 +0000 @@ -175,8 +175,11 @@ } while ((!bootstrap_config || monmap.get_epoch() == 0) && r == 0) { ldout(cct,20) << __func__ << " waiting for monmap|config" << dendl; - map_cond.wait_for(l, ceph::make_timespan( - cct->_conf->mon_client_hunt_interval)); + auto status = map_cond.wait_for(l, ceph::make_timespan( + cct->_conf->mon_client_hunt_interval)); + if (status == std::cv_status::timeout) { + r = -ETIMEDOUT; + } } if (bootstrap_config) { @@ -540,7 +543,11 @@ active_con.reset(); pending_cons.clear(); + auth.reset(); + global_id = 0; + authenticate_err = 0; + authenticated = false; monc_lock.unlock(); @@ -718,9 +725,9 @@ _start_hunting(); if (rank >= 0) { - _add_conn(rank, global_id); + _add_conn(rank); } else { - _add_conns(global_id); + _add_conns(); } // throw out old queued messages @@ -742,11 +749,14 @@ } } -void MonClient::_add_conn(unsigned rank, uint64_t global_id) +void MonClient::_add_conn(unsigned rank) { auto peer = monmap.get_addrs(rank); auto conn = messenger->connect_to_mon(peer); MonConnection mc(cct, conn, global_id, &auth_registry); + if (auth) { + mc.get_auth().reset(auth->clone()); + } pending_cons.insert(std::make_pair(peer, std::move(mc))); ldout(cct, 10) << "picked mon." << monmap.get_name(rank) << " con " << conn @@ -754,7 +764,7 @@ << dendl; } -void MonClient::_add_conns(uint64_t global_id) +void MonClient::_add_conns() { // collect the next batch of candidates who are listed right next to the ones // already tried @@ -807,7 +817,7 @@ n = ranks.size(); } for (unsigned i = 0; i < n; i++) { - _add_conn(ranks[i], global_id); + _add_conn(ranks[i]); tried.insert(ranks[i]); } } @@ -907,7 +917,7 @@ _resend_mon_commands(); send_log(true); if (active_con) { - std::swap(auth, active_con->get_auth()); + auth = std::move(active_con->get_auth()); if (global_id && global_id != active_con->get_global_id()) { lderr(cct) << __func__ << " global_id changed from " << global_id << " to " << active_con->get_global_id() << dendl; @@ -1711,9 +1721,6 @@ return -EACCES; } - if (auth) { - auth.reset(); - } int r = _init_auth(*method, entity_name, want_keys, keyring, true); ceph_assert(r == 0); @@ -1834,12 +1841,6 @@ uint32_t want_keys, RotatingKeyRing* keyring) { - if (auth && (int)m->protocol == auth->get_protocol()) { - // good, negotiation completed - auth->reset(); - return 0; - } - int r = _init_auth(m->protocol, entity_name, want_keys, keyring, false); if (r == -ENOTSUP) { if (m->result == -ENOTSUP) { @@ -1858,9 +1859,15 @@ RotatingKeyRing* keyring, bool msgr2) { - ldout(cct,10) << __func__ << " method " << method << dendl; - auth.reset( - AuthClientHandler::create(cct, method, keyring)); + ldout(cct, 10) << __func__ << " method " << method << dendl; + if (auth && auth->get_protocol() == (int)method) { + ldout(cct, 10) << __func__ << " already have auth, reseting" << dendl; + auth->reset(); + return 0; + } + + ldout(cct, 10) << __func__ << " creating new auth" << dendl; + auth.reset(AuthClientHandler::create(cct, method, keyring)); if (!auth) { ldout(cct, 10) << " no handler for protocol " << method << dendl; return -ENOTSUP; diff -Nru ceph-16.2.0/src/mon/MonClient.h ceph-16.2.1/src/mon/MonClient.h --- ceph-16.2.0/src/mon/MonClient.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/mon/MonClient.h 2021-04-19 13:50:07.000000000 +0000 @@ -351,9 +351,9 @@ void _finish_hunting(int auth_err); void _finish_auth(int auth_err); void _reopen_session(int rank = -1); - void _add_conn(unsigned rank, uint64_t global_id); + void _add_conn(unsigned rank); + void _add_conns(); void _un_backoff(); - void _add_conns(uint64_t global_id); void _send_mon_message(MessageRef m); std::map::iterator _find_pending_con( diff -Nru ceph-16.2.0/src/mon/Monitor.cc ceph-16.2.1/src/mon/Monitor.cc --- ceph-16.2.0/src/mon/Monitor.cc 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/mon/Monitor.cc 2021-04-19 13:50:07.000000000 +0000 @@ -4416,9 +4416,13 @@ if (s->auth_handler) { s->entity_name = s->auth_handler->get_entity_name(); + s->global_id = s->auth_handler->get_global_id(); + s->global_id_status = s->auth_handler->get_global_id_status(); } - dout(20) << " entity " << s->entity_name - << " caps " << s->caps.get_str() << dendl; + dout(20) << " entity_name " << s->entity_name + << " global_id " << s->global_id + << " (" << s->global_id_status + << ") caps " << s->caps.get_str() << dendl; if (!session_stretch_allowed(s, op)) { return; @@ -4472,6 +4476,34 @@ return; } + // global_id_status == NONE: all sessions for auth_none and krb, + // mon <-> mon sessions (including proxied sessions) for cephx + ceph_assert(s->global_id_status == global_id_status_t::NONE || + s->global_id_status == global_id_status_t::NEW_OK || + s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED || + s->global_id_status == global_id_status_t::RECLAIM_OK || + s->global_id_status == global_id_status_t::RECLAIM_INSECURE); + + // let mon_getmap through for "ping" (which doesn't reconnect) + // and "tell" (which reconnects but doesn't attempt to preserve + // its global_id and stays in NEW_NOT_EXPOSED, retrying until + // ->send_attempts reaches 0) + if (cct->_conf->auth_expose_insecure_global_id_reclaim && + s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED && + op->get_req()->get_type() != CEPH_MSG_MON_GET_MAP) { + dout(5) << __func__ << " " << op->get_req()->get_source_inst() + << " may omit old_ticket on reconnects, discarding " + << *op->get_req() << " and forcing reconnect" << dendl; + ceph_assert(s->con && !s->proxy_con); + s->con->mark_down(); + { + std::lock_guard l(session_map_lock); + remove_session(s); + } + op->mark_zap(); + return; + } + switch (op->get_req()->get_type()) { case CEPH_MSG_MON_GET_MAP: handle_mon_get_map(op); @@ -6167,7 +6199,7 @@ } ret = key_server.build_session_auth_info( - service_id, auth_ticket_info.ticket, info, secret, (uint64_t)-1); + service_id, auth_ticket_info.ticket, secret, (uint64_t)-1, info); if (ret < 0) { dout(0) << __func__ << " failed to build mon session_auth_info " << cpp_strerror(ret) << dendl; @@ -6341,14 +6373,14 @@ // are supported by the client if we require it. for msgr2 that // is not necessary. + bool is_new_global_id = false; if (!con->peer_global_id) { con->peer_global_id = authmon()->_assign_global_id(); if (!con->peer_global_id) { dout(1) << __func__ << " failed to assign global_id" << dendl; return -EBUSY; } - dout(10) << __func__ << " assigned global_id " << con->peer_global_id - << dendl; + is_new_global_id = true; } // set up partial session @@ -6358,11 +6390,10 @@ r = s->auth_handler->start_session( entity_name, - auth_meta->get_connection_secret_length(), + con->peer_global_id, + is_new_global_id, reply, - &con->peer_caps_info, - &auth_meta->session_key, - &auth_meta->connection_secret); + &con->peer_caps_info); } else { priv = con->get_priv(); if (!priv) { @@ -6375,7 +6406,6 @@ p, auth_meta->get_connection_secret_length(), reply, - &con->peer_global_id, &con->peer_caps_info, &auth_meta->session_key, &auth_meta->connection_secret); diff -Nru ceph-16.2.0/src/mon/Session.h ceph-16.2.1/src/mon/Session.h --- ceph-16.2.0/src/mon/Session.h 2021-03-30 21:13:28.000000000 +0000 +++ ceph-16.2.1/src/mon/Session.h 2021-04-19 13:50:07.000000000 +0000 @@ -65,6 +65,8 @@ AuthServiceHandler *auth_handler = nullptr; EntityName entity_name; + uint64_t global_id = 0; + global_id_status_t global_id_status = global_id_status_t::NONE; ConnectionRef proxy_con; uint64_t proxy_tid = 0; @@ -132,6 +134,8 @@ f->dump_bool("open", !closed); f->dump_object("caps", caps); f->dump_bool("authenticated", authenticated); + f->dump_unsigned("global_id", global_id); + f->dump_stream("global_id_status") << global_id_status; f->dump_unsigned("osd_epoch", osd_epoch); f->dump_string("remote_host", remote_host); } diff -Nru ceph-16.2.0/src/test/debian-jessie/debian/ceph-base.install ceph-16.2.1/src/test/debian-jessie/debian/ceph-base.install --- ceph-16.2.0/src/test/debian-jessie/debian/ceph-base.install 2021-03-26 09:38:35.000000000 +0000 +++ ceph-16.2.1/src/test/debian-jessie/debian/ceph-base.install 2021-05-04 18:20:11.000000000 +0000 @@ -12,7 +12,6 @@ usr/sbin/ceph-create-keys usr/share/doc/ceph/sample.ceph.conf usr/share/man/man8/ceph-create-keys.8 -usr/share/man/man8/ceph-deploy.8 usr/share/man/man8/ceph-kvstore-tool.8 usr/share/man/man8/ceph-run.8 usr/share/man/man8/crushtool.8 diff -Nru ceph-16.2.0/src/test/debian-jessie/debian/changelog ceph-16.2.1/src/test/debian-jessie/debian/changelog --- ceph-16.2.0/src/test/debian-jessie/debian/changelog 2021-04-10 07:14:20.000000000 +0000 +++ ceph-16.2.1/src/test/debian-jessie/debian/changelog 2021-05-04 18:21:24.000000000 +0000 @@ -1,3 +1,20 @@ +ceph (16.2.1-0ubuntu1) impish; urgency=medium + + [ Chris MacNaughton ] + * d/ceph-base.install: Remove ceph-deploy man page installation + (LP: #1892448). + + [ James Page ] + * SECURITY UPDATE: New upstream point release (LP: #1925322): + - CVE-2021-20288 + * d/rules: remove temporary build objects after install to avoid + running out of disk space during package builds. + * d/p/bug1925347.patch: Cherry pick fix to revert ProtectClock + permissions change in systemd configurations which prevents the + ceph-osd process from starting (LP: #1925347). + + -- James Page Tue, 04 May 2021 19:21:24 +0100 + ceph (16.2.0-0ubuntu1) hirsute; urgency=medium [ Chris MacNaughton ] diff -Nru ceph-16.2.0/src/test/debian-jessie/debian/gbp.conf ceph-16.2.1/src/test/debian-jessie/debian/gbp.conf --- ceph-16.2.0/src/test/debian-jessie/debian/gbp.conf 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/debian-jessie/debian/gbp.conf 2021-05-04 18:21:08.000000000 +0000 @@ -1,3 +1,3 @@ [DEFAULT] -debian-branch = ubuntu/hirsute +debian-branch = ubuntu/impish pristine-tar = True diff -Nru ceph-16.2.0/src/test/debian-jessie/debian/patches/bug1925347.patch ceph-16.2.1/src/test/debian-jessie/debian/patches/bug1925347.patch --- ceph-16.2.0/src/test/debian-jessie/debian/patches/bug1925347.patch 1970-01-01 00:00:00.000000000 +0000 +++ ceph-16.2.1/src/test/debian-jessie/debian/patches/bug1925347.patch 2021-05-04 18:20:11.000000000 +0000 @@ -0,0 +1,154 @@ +From 833a53cfdfa30559d7608e03d6da432260e286a8 Mon Sep 17 00:00:00 2001 +From: Wong Hoi Sing Edison +Date: Wed, 14 Apr 2021 15:36:17 +0800 +Subject: [PATCH] systemd: remove `ProtectClock=true` for `ceph-osd@.service` + +Ceph 16.2.0 Pacific by https://github.com/ceph/ceph/commit/9a84d5a introduce following new systemd restriction: + + ProtectClock=true + ProtectHostname=true + ProtectKernelLogs=true + RestrictSUIDSGID=true + +BTW, `ceph-osd@.service` failed with `ProtectClock=true` unexpectly, also see: + + - + - + +This PR intruduce: + + - Remove `ProtectClock=true` for our systemd service templates + +Fixes: https://tracker.ceph.com/issues/50347 +Signed-off-by: Wong Hoi Sing Edison +(cherry picked from commit 85bc551b179d940a50cbdfd0c20848e3187c70a6) +--- + systemd/ceph-fuse@.service.in | 1 - + systemd/ceph-immutable-object-cache@.service.in | 1 - + systemd/ceph-mds@.service.in | 1 - + systemd/ceph-mgr@.service.in | 1 - + systemd/ceph-mon@.service.in | 1 - + systemd/ceph-osd@.service.in | 1 - + systemd/ceph-radosgw@.service.in | 1 - + systemd/ceph-rbd-mirror@.service.in | 1 - + systemd/cephfs-mirror@.service.in | 3 +-- + 9 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/systemd/ceph-fuse@.service.in b/systemd/ceph-fuse@.service.in +index 1ea4b17675a..9c12c9ba444 100644 +--- a/systemd/ceph-fuse@.service.in ++++ b/systemd/ceph-fuse@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + # ceph-fuse requires access to /dev fuse device + PrivateDevices=no +-ProtectClock=true + ProtectControlGroups=true + ProtectHostname=true + ProtectKernelLogs=true +diff --git a/systemd/ceph-immutable-object-cache@.service.in b/systemd/ceph-immutable-object-cache@.service.in +index f5782487f9e..62ff8dbd272 100644 +--- a/systemd/ceph-immutable-object-cache@.service.in ++++ b/systemd/ceph-immutable-object-cache@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mds@.service.in b/systemd/ceph-mds@.service.in +index 2884f587f97..afa36702f9c 100644 +--- a/systemd/ceph-mds@.service.in ++++ b/systemd/ceph-mds@.service.in +@@ -17,7 +17,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mgr@.service.in b/systemd/ceph-mgr@.service.in +index 1ee28285209..8fadc4746b3 100644 +--- a/systemd/ceph-mgr@.service.in ++++ b/systemd/ceph-mgr@.service.in +@@ -16,7 +16,6 @@ LockPersonality=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mon@.service.in b/systemd/ceph-mon@.service.in +index 994cdfd2869..b7c92f278e3 100644 +--- a/systemd/ceph-mon@.service.in ++++ b/systemd/ceph-mon@.service.in +@@ -22,7 +22,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=false + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-osd@.service.in b/systemd/ceph-osd@.service.in +index 4981417d620..046500efb66 100644 +--- a/systemd/ceph-osd@.service.in ++++ b/systemd/ceph-osd@.service.in +@@ -18,7 +18,6 @@ MemoryDenyWriteExecute=true + # Need NewPrivileges via `sudo smartctl` + NoNewPrivileges=false + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-radosgw@.service.in b/systemd/ceph-radosgw@.service.in +index cfff60c18b8..b7474705506 100644 +--- a/systemd/ceph-radosgw@.service.in ++++ b/systemd/ceph-radosgw@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-rbd-mirror@.service.in b/systemd/ceph-rbd-mirror@.service.in +index fe49f11116e..1057892dc99 100644 +--- a/systemd/ceph-rbd-mirror@.service.in ++++ b/systemd/ceph-rbd-mirror@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/cephfs-mirror@.service.in b/systemd/cephfs-mirror@.service.in +index a97d6ad8b57..bed9d195302 100644 +--- a/systemd/cephfs-mirror@.service.in ++++ b/systemd/cephfs-mirror@.service.in +@@ -15,7 +15,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +@@ -30,4 +29,4 @@ StartLimitInterval=30min + TasksMax=infinity + + [Install] +-WantedBy=cephfs-mirror.target +\ No newline at end of file ++WantedBy=cephfs-mirror.target +-- +2.30.2 + diff -Nru ceph-16.2.0/src/test/debian-jessie/debian/patches/series ceph-16.2.1/src/test/debian-jessie/debian/patches/series --- ceph-16.2.0/src/test/debian-jessie/debian/patches/series 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/debian-jessie/debian/patches/series 2021-05-04 18:20:11.000000000 +0000 @@ -14,3 +14,5 @@ # text relocation fix AARCH64 bug1917414.patch bug1914584.patch +# systemd permissions +bug1925347.patch diff -Nru ceph-16.2.0/src/test/debian-jessie/debian/rules ceph-16.2.1/src/test/debian-jessie/debian/rules --- ceph-16.2.0/src/test/debian-jessie/debian/rules 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/debian-jessie/debian/rules 2021-05-04 18:20:11.000000000 +0000 @@ -125,8 +125,8 @@ # from the package install - package ships unversioned modules. rm -f $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code/libec_*.so.* find $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code -type l -delete || : - # running out of disk space on riscv64 - rm -rf $(CURDIR)/obj-riscv64-linux-gnu + # avoid running out of disk space + rm -rf $(CURDIR)/obj-*-linux-gnu # doc/changelog is a directory, which confuses dh_installchangelogs diff -Nru ceph-16.2.0/src/test/ubuntu-16.04/debian/ceph-base.install ceph-16.2.1/src/test/ubuntu-16.04/debian/ceph-base.install --- ceph-16.2.0/src/test/ubuntu-16.04/debian/ceph-base.install 2021-03-26 09:38:35.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-16.04/debian/ceph-base.install 2021-05-04 18:20:11.000000000 +0000 @@ -12,7 +12,6 @@ usr/sbin/ceph-create-keys usr/share/doc/ceph/sample.ceph.conf usr/share/man/man8/ceph-create-keys.8 -usr/share/man/man8/ceph-deploy.8 usr/share/man/man8/ceph-kvstore-tool.8 usr/share/man/man8/ceph-run.8 usr/share/man/man8/crushtool.8 diff -Nru ceph-16.2.0/src/test/ubuntu-16.04/debian/changelog ceph-16.2.1/src/test/ubuntu-16.04/debian/changelog --- ceph-16.2.0/src/test/ubuntu-16.04/debian/changelog 2021-04-10 07:14:20.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-16.04/debian/changelog 2021-05-04 18:21:24.000000000 +0000 @@ -1,3 +1,20 @@ +ceph (16.2.1-0ubuntu1) impish; urgency=medium + + [ Chris MacNaughton ] + * d/ceph-base.install: Remove ceph-deploy man page installation + (LP: #1892448). + + [ James Page ] + * SECURITY UPDATE: New upstream point release (LP: #1925322): + - CVE-2021-20288 + * d/rules: remove temporary build objects after install to avoid + running out of disk space during package builds. + * d/p/bug1925347.patch: Cherry pick fix to revert ProtectClock + permissions change in systemd configurations which prevents the + ceph-osd process from starting (LP: #1925347). + + -- James Page Tue, 04 May 2021 19:21:24 +0100 + ceph (16.2.0-0ubuntu1) hirsute; urgency=medium [ Chris MacNaughton ] diff -Nru ceph-16.2.0/src/test/ubuntu-16.04/debian/gbp.conf ceph-16.2.1/src/test/ubuntu-16.04/debian/gbp.conf --- ceph-16.2.0/src/test/ubuntu-16.04/debian/gbp.conf 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-16.04/debian/gbp.conf 2021-05-04 18:21:08.000000000 +0000 @@ -1,3 +1,3 @@ [DEFAULT] -debian-branch = ubuntu/hirsute +debian-branch = ubuntu/impish pristine-tar = True diff -Nru ceph-16.2.0/src/test/ubuntu-16.04/debian/patches/bug1925347.patch ceph-16.2.1/src/test/ubuntu-16.04/debian/patches/bug1925347.patch --- ceph-16.2.0/src/test/ubuntu-16.04/debian/patches/bug1925347.patch 1970-01-01 00:00:00.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-16.04/debian/patches/bug1925347.patch 2021-05-04 18:20:11.000000000 +0000 @@ -0,0 +1,154 @@ +From 833a53cfdfa30559d7608e03d6da432260e286a8 Mon Sep 17 00:00:00 2001 +From: Wong Hoi Sing Edison +Date: Wed, 14 Apr 2021 15:36:17 +0800 +Subject: [PATCH] systemd: remove `ProtectClock=true` for `ceph-osd@.service` + +Ceph 16.2.0 Pacific by https://github.com/ceph/ceph/commit/9a84d5a introduce following new systemd restriction: + + ProtectClock=true + ProtectHostname=true + ProtectKernelLogs=true + RestrictSUIDSGID=true + +BTW, `ceph-osd@.service` failed with `ProtectClock=true` unexpectly, also see: + + - + - + +This PR intruduce: + + - Remove `ProtectClock=true` for our systemd service templates + +Fixes: https://tracker.ceph.com/issues/50347 +Signed-off-by: Wong Hoi Sing Edison +(cherry picked from commit 85bc551b179d940a50cbdfd0c20848e3187c70a6) +--- + systemd/ceph-fuse@.service.in | 1 - + systemd/ceph-immutable-object-cache@.service.in | 1 - + systemd/ceph-mds@.service.in | 1 - + systemd/ceph-mgr@.service.in | 1 - + systemd/ceph-mon@.service.in | 1 - + systemd/ceph-osd@.service.in | 1 - + systemd/ceph-radosgw@.service.in | 1 - + systemd/ceph-rbd-mirror@.service.in | 1 - + systemd/cephfs-mirror@.service.in | 3 +-- + 9 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/systemd/ceph-fuse@.service.in b/systemd/ceph-fuse@.service.in +index 1ea4b17675a..9c12c9ba444 100644 +--- a/systemd/ceph-fuse@.service.in ++++ b/systemd/ceph-fuse@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + # ceph-fuse requires access to /dev fuse device + PrivateDevices=no +-ProtectClock=true + ProtectControlGroups=true + ProtectHostname=true + ProtectKernelLogs=true +diff --git a/systemd/ceph-immutable-object-cache@.service.in b/systemd/ceph-immutable-object-cache@.service.in +index f5782487f9e..62ff8dbd272 100644 +--- a/systemd/ceph-immutable-object-cache@.service.in ++++ b/systemd/ceph-immutable-object-cache@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mds@.service.in b/systemd/ceph-mds@.service.in +index 2884f587f97..afa36702f9c 100644 +--- a/systemd/ceph-mds@.service.in ++++ b/systemd/ceph-mds@.service.in +@@ -17,7 +17,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mgr@.service.in b/systemd/ceph-mgr@.service.in +index 1ee28285209..8fadc4746b3 100644 +--- a/systemd/ceph-mgr@.service.in ++++ b/systemd/ceph-mgr@.service.in +@@ -16,7 +16,6 @@ LockPersonality=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mon@.service.in b/systemd/ceph-mon@.service.in +index 994cdfd2869..b7c92f278e3 100644 +--- a/systemd/ceph-mon@.service.in ++++ b/systemd/ceph-mon@.service.in +@@ -22,7 +22,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=false + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-osd@.service.in b/systemd/ceph-osd@.service.in +index 4981417d620..046500efb66 100644 +--- a/systemd/ceph-osd@.service.in ++++ b/systemd/ceph-osd@.service.in +@@ -18,7 +18,6 @@ MemoryDenyWriteExecute=true + # Need NewPrivileges via `sudo smartctl` + NoNewPrivileges=false + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-radosgw@.service.in b/systemd/ceph-radosgw@.service.in +index cfff60c18b8..b7474705506 100644 +--- a/systemd/ceph-radosgw@.service.in ++++ b/systemd/ceph-radosgw@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-rbd-mirror@.service.in b/systemd/ceph-rbd-mirror@.service.in +index fe49f11116e..1057892dc99 100644 +--- a/systemd/ceph-rbd-mirror@.service.in ++++ b/systemd/ceph-rbd-mirror@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/cephfs-mirror@.service.in b/systemd/cephfs-mirror@.service.in +index a97d6ad8b57..bed9d195302 100644 +--- a/systemd/cephfs-mirror@.service.in ++++ b/systemd/cephfs-mirror@.service.in +@@ -15,7 +15,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +@@ -30,4 +29,4 @@ StartLimitInterval=30min + TasksMax=infinity + + [Install] +-WantedBy=cephfs-mirror.target +\ No newline at end of file ++WantedBy=cephfs-mirror.target +-- +2.30.2 + diff -Nru ceph-16.2.0/src/test/ubuntu-16.04/debian/patches/series ceph-16.2.1/src/test/ubuntu-16.04/debian/patches/series --- ceph-16.2.0/src/test/ubuntu-16.04/debian/patches/series 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-16.04/debian/patches/series 2021-05-04 18:20:11.000000000 +0000 @@ -14,3 +14,5 @@ # text relocation fix AARCH64 bug1917414.patch bug1914584.patch +# systemd permissions +bug1925347.patch diff -Nru ceph-16.2.0/src/test/ubuntu-16.04/debian/rules ceph-16.2.1/src/test/ubuntu-16.04/debian/rules --- ceph-16.2.0/src/test/ubuntu-16.04/debian/rules 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-16.04/debian/rules 2021-05-04 18:20:11.000000000 +0000 @@ -125,8 +125,8 @@ # from the package install - package ships unversioned modules. rm -f $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code/libec_*.so.* find $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code -type l -delete || : - # running out of disk space on riscv64 - rm -rf $(CURDIR)/obj-riscv64-linux-gnu + # avoid running out of disk space + rm -rf $(CURDIR)/obj-*-linux-gnu # doc/changelog is a directory, which confuses dh_installchangelogs diff -Nru ceph-16.2.0/src/test/ubuntu-18.04/debian/ceph-base.install ceph-16.2.1/src/test/ubuntu-18.04/debian/ceph-base.install --- ceph-16.2.0/src/test/ubuntu-18.04/debian/ceph-base.install 2021-03-26 09:38:35.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-18.04/debian/ceph-base.install 2021-05-04 18:20:11.000000000 +0000 @@ -12,7 +12,6 @@ usr/sbin/ceph-create-keys usr/share/doc/ceph/sample.ceph.conf usr/share/man/man8/ceph-create-keys.8 -usr/share/man/man8/ceph-deploy.8 usr/share/man/man8/ceph-kvstore-tool.8 usr/share/man/man8/ceph-run.8 usr/share/man/man8/crushtool.8 diff -Nru ceph-16.2.0/src/test/ubuntu-18.04/debian/changelog ceph-16.2.1/src/test/ubuntu-18.04/debian/changelog --- ceph-16.2.0/src/test/ubuntu-18.04/debian/changelog 2021-04-10 07:14:20.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-18.04/debian/changelog 2021-05-04 18:21:24.000000000 +0000 @@ -1,3 +1,20 @@ +ceph (16.2.1-0ubuntu1) impish; urgency=medium + + [ Chris MacNaughton ] + * d/ceph-base.install: Remove ceph-deploy man page installation + (LP: #1892448). + + [ James Page ] + * SECURITY UPDATE: New upstream point release (LP: #1925322): + - CVE-2021-20288 + * d/rules: remove temporary build objects after install to avoid + running out of disk space during package builds. + * d/p/bug1925347.patch: Cherry pick fix to revert ProtectClock + permissions change in systemd configurations which prevents the + ceph-osd process from starting (LP: #1925347). + + -- James Page Tue, 04 May 2021 19:21:24 +0100 + ceph (16.2.0-0ubuntu1) hirsute; urgency=medium [ Chris MacNaughton ] diff -Nru ceph-16.2.0/src/test/ubuntu-18.04/debian/gbp.conf ceph-16.2.1/src/test/ubuntu-18.04/debian/gbp.conf --- ceph-16.2.0/src/test/ubuntu-18.04/debian/gbp.conf 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-18.04/debian/gbp.conf 2021-05-04 18:21:08.000000000 +0000 @@ -1,3 +1,3 @@ [DEFAULT] -debian-branch = ubuntu/hirsute +debian-branch = ubuntu/impish pristine-tar = True diff -Nru ceph-16.2.0/src/test/ubuntu-18.04/debian/patches/bug1925347.patch ceph-16.2.1/src/test/ubuntu-18.04/debian/patches/bug1925347.patch --- ceph-16.2.0/src/test/ubuntu-18.04/debian/patches/bug1925347.patch 1970-01-01 00:00:00.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-18.04/debian/patches/bug1925347.patch 2021-05-04 18:20:11.000000000 +0000 @@ -0,0 +1,154 @@ +From 833a53cfdfa30559d7608e03d6da432260e286a8 Mon Sep 17 00:00:00 2001 +From: Wong Hoi Sing Edison +Date: Wed, 14 Apr 2021 15:36:17 +0800 +Subject: [PATCH] systemd: remove `ProtectClock=true` for `ceph-osd@.service` + +Ceph 16.2.0 Pacific by https://github.com/ceph/ceph/commit/9a84d5a introduce following new systemd restriction: + + ProtectClock=true + ProtectHostname=true + ProtectKernelLogs=true + RestrictSUIDSGID=true + +BTW, `ceph-osd@.service` failed with `ProtectClock=true` unexpectly, also see: + + - + - + +This PR intruduce: + + - Remove `ProtectClock=true` for our systemd service templates + +Fixes: https://tracker.ceph.com/issues/50347 +Signed-off-by: Wong Hoi Sing Edison +(cherry picked from commit 85bc551b179d940a50cbdfd0c20848e3187c70a6) +--- + systemd/ceph-fuse@.service.in | 1 - + systemd/ceph-immutable-object-cache@.service.in | 1 - + systemd/ceph-mds@.service.in | 1 - + systemd/ceph-mgr@.service.in | 1 - + systemd/ceph-mon@.service.in | 1 - + systemd/ceph-osd@.service.in | 1 - + systemd/ceph-radosgw@.service.in | 1 - + systemd/ceph-rbd-mirror@.service.in | 1 - + systemd/cephfs-mirror@.service.in | 3 +-- + 9 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/systemd/ceph-fuse@.service.in b/systemd/ceph-fuse@.service.in +index 1ea4b17675a..9c12c9ba444 100644 +--- a/systemd/ceph-fuse@.service.in ++++ b/systemd/ceph-fuse@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + # ceph-fuse requires access to /dev fuse device + PrivateDevices=no +-ProtectClock=true + ProtectControlGroups=true + ProtectHostname=true + ProtectKernelLogs=true +diff --git a/systemd/ceph-immutable-object-cache@.service.in b/systemd/ceph-immutable-object-cache@.service.in +index f5782487f9e..62ff8dbd272 100644 +--- a/systemd/ceph-immutable-object-cache@.service.in ++++ b/systemd/ceph-immutable-object-cache@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mds@.service.in b/systemd/ceph-mds@.service.in +index 2884f587f97..afa36702f9c 100644 +--- a/systemd/ceph-mds@.service.in ++++ b/systemd/ceph-mds@.service.in +@@ -17,7 +17,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mgr@.service.in b/systemd/ceph-mgr@.service.in +index 1ee28285209..8fadc4746b3 100644 +--- a/systemd/ceph-mgr@.service.in ++++ b/systemd/ceph-mgr@.service.in +@@ -16,7 +16,6 @@ LockPersonality=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mon@.service.in b/systemd/ceph-mon@.service.in +index 994cdfd2869..b7c92f278e3 100644 +--- a/systemd/ceph-mon@.service.in ++++ b/systemd/ceph-mon@.service.in +@@ -22,7 +22,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=false + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-osd@.service.in b/systemd/ceph-osd@.service.in +index 4981417d620..046500efb66 100644 +--- a/systemd/ceph-osd@.service.in ++++ b/systemd/ceph-osd@.service.in +@@ -18,7 +18,6 @@ MemoryDenyWriteExecute=true + # Need NewPrivileges via `sudo smartctl` + NoNewPrivileges=false + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-radosgw@.service.in b/systemd/ceph-radosgw@.service.in +index cfff60c18b8..b7474705506 100644 +--- a/systemd/ceph-radosgw@.service.in ++++ b/systemd/ceph-radosgw@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-rbd-mirror@.service.in b/systemd/ceph-rbd-mirror@.service.in +index fe49f11116e..1057892dc99 100644 +--- a/systemd/ceph-rbd-mirror@.service.in ++++ b/systemd/ceph-rbd-mirror@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/cephfs-mirror@.service.in b/systemd/cephfs-mirror@.service.in +index a97d6ad8b57..bed9d195302 100644 +--- a/systemd/cephfs-mirror@.service.in ++++ b/systemd/cephfs-mirror@.service.in +@@ -15,7 +15,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +@@ -30,4 +29,4 @@ StartLimitInterval=30min + TasksMax=infinity + + [Install] +-WantedBy=cephfs-mirror.target +\ No newline at end of file ++WantedBy=cephfs-mirror.target +-- +2.30.2 + diff -Nru ceph-16.2.0/src/test/ubuntu-18.04/debian/patches/series ceph-16.2.1/src/test/ubuntu-18.04/debian/patches/series --- ceph-16.2.0/src/test/ubuntu-18.04/debian/patches/series 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-18.04/debian/patches/series 2021-05-04 18:20:11.000000000 +0000 @@ -14,3 +14,5 @@ # text relocation fix AARCH64 bug1917414.patch bug1914584.patch +# systemd permissions +bug1925347.patch diff -Nru ceph-16.2.0/src/test/ubuntu-18.04/debian/rules ceph-16.2.1/src/test/ubuntu-18.04/debian/rules --- ceph-16.2.0/src/test/ubuntu-18.04/debian/rules 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-18.04/debian/rules 2021-05-04 18:20:11.000000000 +0000 @@ -125,8 +125,8 @@ # from the package install - package ships unversioned modules. rm -f $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code/libec_*.so.* find $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code -type l -delete || : - # running out of disk space on riscv64 - rm -rf $(CURDIR)/obj-riscv64-linux-gnu + # avoid running out of disk space + rm -rf $(CURDIR)/obj-*-linux-gnu # doc/changelog is a directory, which confuses dh_installchangelogs diff -Nru ceph-16.2.0/src/test/ubuntu-20.04/debian/ceph-base.install ceph-16.2.1/src/test/ubuntu-20.04/debian/ceph-base.install --- ceph-16.2.0/src/test/ubuntu-20.04/debian/ceph-base.install 2021-03-26 09:38:35.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-20.04/debian/ceph-base.install 2021-05-04 18:20:11.000000000 +0000 @@ -12,7 +12,6 @@ usr/sbin/ceph-create-keys usr/share/doc/ceph/sample.ceph.conf usr/share/man/man8/ceph-create-keys.8 -usr/share/man/man8/ceph-deploy.8 usr/share/man/man8/ceph-kvstore-tool.8 usr/share/man/man8/ceph-run.8 usr/share/man/man8/crushtool.8 diff -Nru ceph-16.2.0/src/test/ubuntu-20.04/debian/changelog ceph-16.2.1/src/test/ubuntu-20.04/debian/changelog --- ceph-16.2.0/src/test/ubuntu-20.04/debian/changelog 2021-04-10 07:14:20.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-20.04/debian/changelog 2021-05-04 18:21:24.000000000 +0000 @@ -1,3 +1,20 @@ +ceph (16.2.1-0ubuntu1) impish; urgency=medium + + [ Chris MacNaughton ] + * d/ceph-base.install: Remove ceph-deploy man page installation + (LP: #1892448). + + [ James Page ] + * SECURITY UPDATE: New upstream point release (LP: #1925322): + - CVE-2021-20288 + * d/rules: remove temporary build objects after install to avoid + running out of disk space during package builds. + * d/p/bug1925347.patch: Cherry pick fix to revert ProtectClock + permissions change in systemd configurations which prevents the + ceph-osd process from starting (LP: #1925347). + + -- James Page Tue, 04 May 2021 19:21:24 +0100 + ceph (16.2.0-0ubuntu1) hirsute; urgency=medium [ Chris MacNaughton ] diff -Nru ceph-16.2.0/src/test/ubuntu-20.04/debian/gbp.conf ceph-16.2.1/src/test/ubuntu-20.04/debian/gbp.conf --- ceph-16.2.0/src/test/ubuntu-20.04/debian/gbp.conf 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-20.04/debian/gbp.conf 2021-05-04 18:21:08.000000000 +0000 @@ -1,3 +1,3 @@ [DEFAULT] -debian-branch = ubuntu/hirsute +debian-branch = ubuntu/impish pristine-tar = True diff -Nru ceph-16.2.0/src/test/ubuntu-20.04/debian/patches/bug1925347.patch ceph-16.2.1/src/test/ubuntu-20.04/debian/patches/bug1925347.patch --- ceph-16.2.0/src/test/ubuntu-20.04/debian/patches/bug1925347.patch 1970-01-01 00:00:00.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-20.04/debian/patches/bug1925347.patch 2021-05-04 18:20:11.000000000 +0000 @@ -0,0 +1,154 @@ +From 833a53cfdfa30559d7608e03d6da432260e286a8 Mon Sep 17 00:00:00 2001 +From: Wong Hoi Sing Edison +Date: Wed, 14 Apr 2021 15:36:17 +0800 +Subject: [PATCH] systemd: remove `ProtectClock=true` for `ceph-osd@.service` + +Ceph 16.2.0 Pacific by https://github.com/ceph/ceph/commit/9a84d5a introduce following new systemd restriction: + + ProtectClock=true + ProtectHostname=true + ProtectKernelLogs=true + RestrictSUIDSGID=true + +BTW, `ceph-osd@.service` failed with `ProtectClock=true` unexpectly, also see: + + - + - + +This PR intruduce: + + - Remove `ProtectClock=true` for our systemd service templates + +Fixes: https://tracker.ceph.com/issues/50347 +Signed-off-by: Wong Hoi Sing Edison +(cherry picked from commit 85bc551b179d940a50cbdfd0c20848e3187c70a6) +--- + systemd/ceph-fuse@.service.in | 1 - + systemd/ceph-immutable-object-cache@.service.in | 1 - + systemd/ceph-mds@.service.in | 1 - + systemd/ceph-mgr@.service.in | 1 - + systemd/ceph-mon@.service.in | 1 - + systemd/ceph-osd@.service.in | 1 - + systemd/ceph-radosgw@.service.in | 1 - + systemd/ceph-rbd-mirror@.service.in | 1 - + systemd/cephfs-mirror@.service.in | 3 +-- + 9 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/systemd/ceph-fuse@.service.in b/systemd/ceph-fuse@.service.in +index 1ea4b17675a..9c12c9ba444 100644 +--- a/systemd/ceph-fuse@.service.in ++++ b/systemd/ceph-fuse@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + # ceph-fuse requires access to /dev fuse device + PrivateDevices=no +-ProtectClock=true + ProtectControlGroups=true + ProtectHostname=true + ProtectKernelLogs=true +diff --git a/systemd/ceph-immutable-object-cache@.service.in b/systemd/ceph-immutable-object-cache@.service.in +index f5782487f9e..62ff8dbd272 100644 +--- a/systemd/ceph-immutable-object-cache@.service.in ++++ b/systemd/ceph-immutable-object-cache@.service.in +@@ -14,7 +14,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mds@.service.in b/systemd/ceph-mds@.service.in +index 2884f587f97..afa36702f9c 100644 +--- a/systemd/ceph-mds@.service.in ++++ b/systemd/ceph-mds@.service.in +@@ -17,7 +17,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mgr@.service.in b/systemd/ceph-mgr@.service.in +index 1ee28285209..8fadc4746b3 100644 +--- a/systemd/ceph-mgr@.service.in ++++ b/systemd/ceph-mgr@.service.in +@@ -16,7 +16,6 @@ LockPersonality=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-mon@.service.in b/systemd/ceph-mon@.service.in +index 994cdfd2869..b7c92f278e3 100644 +--- a/systemd/ceph-mon@.service.in ++++ b/systemd/ceph-mon@.service.in +@@ -22,7 +22,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=false + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-osd@.service.in b/systemd/ceph-osd@.service.in +index 4981417d620..046500efb66 100644 +--- a/systemd/ceph-osd@.service.in ++++ b/systemd/ceph-osd@.service.in +@@ -18,7 +18,6 @@ MemoryDenyWriteExecute=true + # Need NewPrivileges via `sudo smartctl` + NoNewPrivileges=false + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-radosgw@.service.in b/systemd/ceph-radosgw@.service.in +index cfff60c18b8..b7474705506 100644 +--- a/systemd/ceph-radosgw@.service.in ++++ b/systemd/ceph-radosgw@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/ceph-rbd-mirror@.service.in b/systemd/ceph-rbd-mirror@.service.in +index fe49f11116e..1057892dc99 100644 +--- a/systemd/ceph-rbd-mirror@.service.in ++++ b/systemd/ceph-rbd-mirror@.service.in +@@ -16,7 +16,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +diff --git a/systemd/cephfs-mirror@.service.in b/systemd/cephfs-mirror@.service.in +index a97d6ad8b57..bed9d195302 100644 +--- a/systemd/cephfs-mirror@.service.in ++++ b/systemd/cephfs-mirror@.service.in +@@ -15,7 +15,6 @@ MemoryDenyWriteExecute=true + NoNewPrivileges=true + PrivateDevices=yes + PrivateTmp=true +-ProtectClock=true + ProtectControlGroups=true + ProtectHome=true + ProtectHostname=true +@@ -30,4 +29,4 @@ StartLimitInterval=30min + TasksMax=infinity + + [Install] +-WantedBy=cephfs-mirror.target +\ No newline at end of file ++WantedBy=cephfs-mirror.target +-- +2.30.2 + diff -Nru ceph-16.2.0/src/test/ubuntu-20.04/debian/patches/series ceph-16.2.1/src/test/ubuntu-20.04/debian/patches/series --- ceph-16.2.0/src/test/ubuntu-20.04/debian/patches/series 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-20.04/debian/patches/series 2021-05-04 18:20:11.000000000 +0000 @@ -14,3 +14,5 @@ # text relocation fix AARCH64 bug1917414.patch bug1914584.patch +# systemd permissions +bug1925347.patch diff -Nru ceph-16.2.0/src/test/ubuntu-20.04/debian/rules ceph-16.2.1/src/test/ubuntu-20.04/debian/rules --- ceph-16.2.0/src/test/ubuntu-20.04/debian/rules 2021-04-10 07:12:54.000000000 +0000 +++ ceph-16.2.1/src/test/ubuntu-20.04/debian/rules 2021-05-04 18:20:11.000000000 +0000 @@ -125,8 +125,8 @@ # from the package install - package ships unversioned modules. rm -f $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code/libec_*.so.* find $(CURDIR)/debian/tmp/usr/lib/*/ceph/erasure-code -type l -delete || : - # running out of disk space on riscv64 - rm -rf $(CURDIR)/obj-riscv64-linux-gnu + # avoid running out of disk space + rm -rf $(CURDIR)/obj-*-linux-gnu # doc/changelog is a directory, which confuses dh_installchangelogs