diff -Nru rust-parking-lot-0.7.1/appveyor.yml rust-parking-lot-0.10.0/appveyor.yml --- rust-parking-lot-0.7.1/appveyor.yml 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/appveyor.yml 2019-11-09 11:27:25.000000000 +0000 @@ -2,28 +2,58 @@ TRAVIS_CARGO_NIGHTLY_FEATURE: nightly RUST_TEST_THREADS: 1 matrix: - - TARGET: nightly-x86_64-pc-windows-msvc - - TARGET: nightly-i686-pc-windows-msvc - - TARGET: nightly-x86_64-pc-windows-gnu - - TARGET: nightly-i686-pc-windows-gnu - - TARGET: 1.24.0-x86_64-pc-windows-msvc - - TARGET: 1.24.0-i686-pc-windows-msvc - - TARGET: 1.24.0-x86_64-pc-windows-gnu - - TARGET: 1.24.0-i686-pc-windows-gnu + - TARGET: x86_64-pc-windows-msvc + MSYSTEM: MINGW64 + CPU: x86_64 + TOOLCHAIN: nightly + FEATURES: nightly + - TARGET: i686-pc-windows-msvc + MSYSTEM: MINGW32 + CPU: i686 + TOOLCHAIN: nightly + FEATURES: nightly + - TARGET: x86_64-pc-windows-gnu + MSYSTEM: MINGW64 + CPU: x86_64 + TOOLCHAIN: nightly + FEATURES: nightly + - TARGET: i686-pc-windows-gnu + MSYSTEM: MINGW32 + CPU: i686 + TOOLCHAIN: nightly + FEATURES: nightly + - TARGET: x86_64-pc-windows-msvc + MSYSTEM: MINGW64 + CPU: x86_64 + TOOLCHAIN: 1.36.0 + - TARGET: i686-pc-windows-msvc + MSYSTEM: MINGW32 + CPU: i686 + TOOLCHAIN: 1.36.0 + - TARGET: x86_64-pc-windows-gnu + MSYSTEM: MINGW64 + CPU: x86_64 + TOOLCHAIN: 1.36.0 + - TARGET: i686-pc-windows-gnu + MSYSTEM: MINGW32 + CPU: i686 + TOOLCHAIN: 1.36.0 install: - - SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts - - pip install "travis-cargo<0.2" --user - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe" - - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null - - ps: $env:PATH="$env:PATH;C:\rust\bin" + - set PATH=C:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% + - pacman --noconfirm -Syu mingw-w64-%CPU%-make + - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host %TARGET% --default-toolchain %TOOLCHAIN% + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - rustc -vV - cargo -vV build_script: - - travis-cargo build + - cargo build --features "%FEATURES%" test_script: - - travis-cargo test - - travis-cargo --only nightly test -- --features=deadlock_detection - - travis-cargo doc + - cargo test --all --features "%FEATURES%" + - cargo doc --all + - cd benchmark + - cargo run --release --bin mutex -- 2 1 0 1 2 + - cargo run --release --bin rwlock -- 1 1 1 0 1 2 diff -Nru rust-parking-lot-0.7.1/Cargo.toml rust-parking-lot-0.10.0/Cargo.toml --- rust-parking-lot-0.7.1/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ rust-parking-lot-0.10.0/Cargo.toml 2019-11-25 21:16:48.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "parking_lot" -version = "0.7.1" +version = "0.10.0" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." readme = "README.md" @@ -21,15 +22,22 @@ license = "Apache-2.0/MIT" repository = "https://github.com/Amanieu/parking_lot" [dependencies.lock_api] -version = "0.1" +version = "0.3.1" [dependencies.parking_lot_core] -version = "0.4" +version = "0.7.0" +[dev-dependencies.bincode] +version = "1.1.3" + +[dev-dependencies.lazy_static] +version = "1.0" + [dev-dependencies.rand] -version = "0.6" +version = "0.7" [features] deadlock_detection = ["parking_lot_core/deadlock_detection"] -default = ["owning_ref"] +default = [] nightly = ["parking_lot_core/nightly", "lock_api/nightly"] owning_ref = ["lock_api/owning_ref"] +serde = ["lock_api/serde"] diff -Nru rust-parking-lot-0.7.1/Cargo.toml.orig rust-parking-lot-0.10.0/Cargo.toml.orig --- rust-parking-lot-0.7.1/Cargo.toml.orig 2019-01-01 14:21:51.000000000 +0000 +++ rust-parking-lot-0.10.0/Cargo.toml.orig 2019-11-25 21:09:35.000000000 +0000 @@ -1,6 +1,6 @@ [package] name = "parking_lot" -version = "0.7.1" +version = "0.10.0" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." license = "Apache-2.0/MIT" @@ -8,19 +8,25 @@ readme = "README.md" keywords = ["mutex", "condvar", "rwlock", "once", "thread"] categories = ["concurrency"] +edition = "2018" [dependencies] -parking_lot_core = { path = "core", version = "0.4" } -lock_api = { path = "lock_api", version = "0.1" } +parking_lot_core = { path = "core", version = "0.7.0" } +lock_api = { path = "lock_api", version = "0.3.1" } [dev-dependencies] -rand = "0.6" +rand = "0.7" +lazy_static = "1.0" + +# Used when testing out serde support. +bincode = {version = "1.1.3"} [features] -default = ["owning_ref"] +default = [] owning_ref = ["lock_api/owning_ref"] nightly = ["parking_lot_core/nightly", "lock_api/nightly"] deadlock_detection = ["parking_lot_core/deadlock_detection"] +serde = ["lock_api/serde"] [workspace] exclude = ["benchmark"] diff -Nru rust-parking-lot-0.7.1/.cargo_vcs_info.json rust-parking-lot-0.10.0/.cargo_vcs_info.json --- rust-parking-lot-0.7.1/.cargo_vcs_info.json 1970-01-01 00:00:00.000000000 +0000 +++ rust-parking-lot-0.10.0/.cargo_vcs_info.json 2019-11-25 21:16:48.000000000 +0000 @@ -1,5 +1,5 @@ { "git": { - "sha1": "407aa52807bc96fd1d6d3bbfd60298b664a77880" + "sha1": "bbbb57633f74ae27fc23cf4b6aee24e9d3449a10" } } diff -Nru rust-parking-lot-0.7.1/CHANGELOG.md rust-parking-lot-0.10.0/CHANGELOG.md --- rust-parking-lot-0.7.1/CHANGELOG.md 2019-01-01 14:19:44.000000000 +0000 +++ rust-parking-lot-0.10.0/CHANGELOG.md 2019-11-25 21:09:35.000000000 +0000 @@ -1,26 +1,72 @@ -0.7.1 (2019-01-01) -================== +## parking_lot 0.10.0, parking_lot_core 0.7.0, lock_api 0.3.2 (2019-11-25) + +- Upgrade smallvec dependency to 1.0 in parking_lot_core. +- Replace all usage of `mem::unitialized` with `mem::MaybeUninit`. +- The minimum required Rust version is bumped to 1.36. Because of the above two changes. +- Make methods on `WaitTimeoutResult` and `OnceState` take `self` by value instead of reference. + +## parking_lot_core 0.6.2 (2019-07-22) + +- Fixed compile error on Windows with old cfg_if version. (#164) + +## parking_lot_core 0.6.1 (2019-07-17) + +- Fixed Android build. (#163) + +## parking_lot 0.9.0, parking_lot_core 0.6.0, lock_api 0.3.1 (2019-07-14) + +- Re-export lock_api (0.3.1) from parking_lot (#150) +- Removed (non-dev) dependency on rand crate for fairness mechanism, by + including a simple xorshift PRNG in core (#144) +- Android now uses the futex-based ThreadParker. (#140) +- Fixed CloudABI ThreadParker. (#140) +- Fix race condition in lock_api::ReentrantMutex (da16c2c7) + +## lock_api 0.3.0 (2019-07-03, _yanked_) + +- Use NonZeroUsize in GetThreadId::nonzero_thread_id (#148) +- Debug assert lock_count in ReentrantMutex (#148) +- Tag as `unsafe` and document some internal methods (#148) +- This release was _yanked_ due to a regression in ReentrantMutex (da16c2c7) + +## parking_lot 0.8.1 (2019-07-03, _yanked_) + +- Re-export lock_api (0.3.0) from parking_lot (#150) +- This release was _yanked_ from crates.io due to unexpected breakage (#156) + +## parking_lot 0.8.0, parking_lot_core 0.5.0, lock_api 0.2.0 (2019-05-04) + +- Fix race conditions in deadlock detection. +- Support for more platforms by adding ThreadParker implementations for + Wasm, Redox, SGX and CloudABI. +- Drop support for older Rust. parking_lot now requires 1.31 and is a + Rust 2018 edition crate (#122). +- Disable the owning_ref feature by default. +- Fix was_last_thread value in the timeout callback of park() (#129). +- Support single byte Mutex/Once on stable Rust when compiler is at least + version 1.34. +- Make Condvar::new and Once::new const fns on stable Rust and remove + ONCE_INIT (#134). +- Add optional Serde support (#135). + +## parking_lot 0.7.1 (2019-01-01) - Fixed potential deadlock when upgrading a RwLock. - Fixed overflow panic on very long timeouts (#111). -0.7.0 (2018-11-20) -================== +## parking_lot 0.7.0, parking_lot_core 0.4.0 (2018-11-26) - Return if or how many threads were notified from `Condvar::notify_*` -0.6.3 (2018-07-18) -================== +## parking_lot 0.6.3 (2018-07-18) - Export `RawMutex`, `RawRwLock` and `RawThreadId`. -0.6.2 (2018-06-18) -================== +## parking_lot 0.6.2 (2018-06-18) - Enable `lock_api/nightly` feature from `parking_lot/nightly` (#79) -0.6.1 (2018-06-08) -================== +## parking_lot 0.6.1 (2018-06-08) Added missing typedefs for mapped lock guards: @@ -29,16 +75,15 @@ - `MappedRwLockReadGuard` - `MappedRwLockWriteGuard` -0.6.0 (2018-06-08) -================== +## parking_lot 0.6.0 (2018-06-08) This release moves most of the code for type-safe `Mutex` and `RwLock` types into a separate crate called `lock_api`. This new crate is compatible with -`no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a -raw mutex type which implements the `RawMutex` or `RawRwLock` trait. The API -provided by the wrapper types can be extended by implementing more traits on the -raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See the -crate documentation for more details. +`no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a raw +mutex type which implements the `RawMutex` or `RawRwLock` trait. The API +provided by the wrapper types can be extended by implementing more traits on +the raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See +the crate documentation for more details. There are also several major changes: diff -Nru rust-parking-lot-0.7.1/debian/cargo-checksum.json rust-parking-lot-0.10.0/debian/cargo-checksum.json --- rust-parking-lot-0.7.1/debian/cargo-checksum.json 2019-01-22 05:21:27.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/cargo-checksum.json 2020-01-08 22:40:18.000000000 +0000 @@ -1 +1 @@ -{"package":"ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337","files":{}} +{"package":"92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc","files":{}} diff -Nru rust-parking-lot-0.7.1/debian/changelog rust-parking-lot-0.10.0/debian/changelog --- rust-parking-lot-0.7.1/debian/changelog 2019-01-22 05:21:27.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/changelog 2020-01-08 22:40:18.000000000 +0000 @@ -1,3 +1,17 @@ +rust-parking-lot (0.10.0-1) unstable; urgency=medium + + * Team upload. + * Package parking_lot 0.10.0 from crates.io using debcargo 2.4.2 + + -- Ximin Luo Wed, 08 Jan 2020 22:40:18 +0000 + +rust-parking-lot (0.9.0-1) unstable; urgency=medium + + * Team upload. + * Package parking_lot 0.9.0 from crates.io using debcargo 2.4.0 + + -- Ximin Luo Wed, 04 Sep 2019 23:27:36 -0700 + rust-parking-lot (0.7.1-1) unstable; urgency=medium * Package parking_lot 0.7.1 from crates.io using debcargo 2.2.9 diff -Nru rust-parking-lot-0.7.1/debian/control rust-parking-lot-0.10.0/debian/control --- rust-parking-lot-0.7.1/debian/control 2019-01-22 05:21:27.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/control 2020-01-08 22:40:18.000000000 +0000 @@ -2,18 +2,17 @@ Section: rust Priority: optional Build-Depends: debhelper (>= 11), - dh-cargo (>= 10), + dh-cargo (>= 18), cargo:native , rustc:native , libstd-rust-dev , - librust-lock-api-0.1+default-dev , - librust-lock-api-0.1+owning-ref-dev , - librust-parking-lot-core-0.4+default-dev + librust-lock-api-0.3+default-dev (>= 0.3.1-~~) , + librust-parking-lot-core-0.7+default-dev Maintainer: Debian Rust Maintainers Uploaders: Sylvestre Ledru , Wolfgang Silbermayr -Standards-Version: 4.2.0 +Standards-Version: 4.4.1 Vcs-Git: https://salsa.debian.org/rust-team/debcargo-conf.git [src/parking-lot] Vcs-Browser: https://salsa.debian.org/rust-team/debcargo-conf/tree/master/src/parking-lot X-Cargo-Crate: parking_lot @@ -23,17 +22,21 @@ Multi-Arch: same Depends: ${misc:Depends}, - librust-lock-api-0.1+default-dev, - librust-parking-lot-core-0.4+default-dev -Recommends: - librust-parking-lot+owning-ref-dev (= ${binary:Version}) + librust-lock-api-0.3+default-dev (>= 0.3.1-~~), + librust-parking-lot-core-0.7+default-dev Suggests: librust-parking-lot+deadlock-detection-dev (= ${binary:Version}), - librust-parking-lot+nightly-dev (= ${binary:Version}) + librust-parking-lot+nightly-dev (= ${binary:Version}), + librust-parking-lot+owning-ref-dev (= ${binary:Version}), + librust-parking-lot+serde-dev (= ${binary:Version}) Provides: + librust-parking-lot+default-dev (= ${binary:Version}), librust-parking-lot-0-dev (= ${binary:Version}), - librust-parking-lot-0.7-dev (= ${binary:Version}), - librust-parking-lot-0.7.1-dev (= ${binary:Version}) + librust-parking-lot-0+default-dev (= ${binary:Version}), + librust-parking-lot-0.10-dev (= ${binary:Version}), + librust-parking-lot-0.10+default-dev (= ${binary:Version}), + librust-parking-lot-0.10.0-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+default-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - Rust source code This package contains the source for the Rust parking_lot crate, packaged by debcargo for use with cargo and dh-cargo. @@ -44,13 +47,13 @@ Depends: ${misc:Depends}, librust-parking-lot-dev (= ${binary:Version}), - librust-parking-lot-core-0.4+deadlock-detection-dev + librust-parking-lot-core-0.7+deadlock-detection-dev Provides: librust-parking-lot-0+deadlock-detection-dev (= ${binary:Version}), - librust-parking-lot-0.7+deadlock-detection-dev (= ${binary:Version}), - librust-parking-lot-0.7.1+deadlock-detection-dev (= ${binary:Version}) + librust-parking-lot-0.10+deadlock-detection-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+deadlock-detection-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "deadlock_detection" - This metapackage enables feature deadlock_detection for the Rust parking_lot + This metapackage enables feature "deadlock_detection" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. Package: librust-parking-lot+nightly-dev @@ -59,14 +62,14 @@ Depends: ${misc:Depends}, librust-parking-lot-dev (= ${binary:Version}), - librust-lock-api-0.1+nightly-dev, - librust-parking-lot-core-0.4+nightly-dev + librust-lock-api-0.3+nightly-dev (>= 0.3.1-~~), + librust-parking-lot-core-0.7+nightly-dev Provides: librust-parking-lot-0+nightly-dev (= ${binary:Version}), - librust-parking-lot-0.7+nightly-dev (= ${binary:Version}), - librust-parking-lot-0.7.1+nightly-dev (= ${binary:Version}) + librust-parking-lot-0.10+nightly-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+nightly-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "nightly" - This metapackage enables feature nightly for the Rust parking_lot crate, by + This metapackage enables feature "nightly" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. Package: librust-parking-lot+owning-ref-dev @@ -75,15 +78,26 @@ Depends: ${misc:Depends}, librust-parking-lot-dev (= ${binary:Version}), - librust-lock-api-0.1+owning-ref-dev + librust-lock-api-0.3+owning-ref-dev (>= 0.3.1-~~) Provides: - librust-parking-lot+default-dev (= ${binary:Version}), librust-parking-lot-0+owning-ref-dev (= ${binary:Version}), - librust-parking-lot-0+default-dev (= ${binary:Version}), - librust-parking-lot-0.7+owning-ref-dev (= ${binary:Version}), - librust-parking-lot-0.7+default-dev (= ${binary:Version}), - librust-parking-lot-0.7.1+owning-ref-dev (= ${binary:Version}), - librust-parking-lot-0.7.1+default-dev (= ${binary:Version}) + librust-parking-lot-0.10+owning-ref-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+owning-ref-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "owning_ref" - This metapackage enables feature owning_ref for the Rust parking_lot crate, by + This metapackage enables feature "owning_ref" for the Rust parking_lot crate, + by pulling in any additional dependencies needed by that feature. + +Package: librust-parking-lot+serde-dev +Architecture: any +Multi-Arch: same +Depends: + ${misc:Depends}, + librust-parking-lot-dev (= ${binary:Version}), + librust-lock-api-0.3+serde-dev (>= 0.3.1-~~) +Provides: + librust-parking-lot-0+serde-dev (= ${binary:Version}), + librust-parking-lot-0.10+serde-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+serde-dev (= ${binary:Version}) +Description: Compact, efficient std sync primitives - feature "serde" + This metapackage enables feature "serde" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. diff -Nru rust-parking-lot-0.7.1/debian/copyright.debcargo.hint rust-parking-lot-0.10.0/debian/copyright.debcargo.hint --- rust-parking-lot-0.7.1/debian/copyright.debcargo.hint 2019-01-22 05:21:27.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/copyright.debcargo.hint 2020-01-08 22:40:18.000000000 +0000 @@ -91,9 +91,9 @@ Files: debian/* Copyright: - 2018-2019 Debian Rust Maintainers - 2018-2019 Sylvestre Ledru - 2018-2019 Wolfgang Silbermayr + 2018-2020 Debian Rust Maintainers + 2018-2020 Sylvestre Ledru + 2018-2020 Wolfgang Silbermayr License: Apache-2.0 or MIT License: Apache-2.0 diff -Nru rust-parking-lot-0.7.1/debian/tests/control rust-parking-lot-0.10.0/debian/tests/control --- rust-parking-lot-0.7.1/debian/tests/control 1970-01-01 00:00:00.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/tests/control 2020-01-08 22:40:18.000000000 +0000 @@ -0,0 +1,29 @@ +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --all-features +Features: test-name=@ +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ +Restrictions: allow-stderr, skip-not-installable + +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --no-default-features +Features: test-name=librust-parking-lot-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ +Restrictions: allow-stderr, skip-not-installable + +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features deadlock_detection +Features: test-name=librust-parking-lot+deadlock-detection-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ +Restrictions: allow-stderr, skip-not-installable + +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features nightly +Features: test-name=librust-parking-lot+nightly-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ +Restrictions: allow-stderr, skip-not-installable + +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features owning_ref +Features: test-name=librust-parking-lot+owning-ref-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ +Restrictions: allow-stderr, skip-not-installable + +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features serde +Features: test-name=librust-parking-lot+serde-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ +Restrictions: allow-stderr, skip-not-installable diff -Nru rust-parking-lot-0.7.1/debian/watch rust-parking-lot-0.10.0/debian/watch --- rust-parking-lot-0.7.1/debian/watch 2019-01-22 05:21:27.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/watch 2020-01-08 22:40:18.000000000 +0000 @@ -2,4 +2,3 @@ opts=filenamemangle=s/.*\/(.*)\/download/parking_lot-$1\.tar\.gz/g,\ uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha)\d*)$/$1~$2/ \ https://qa.debian.org/cgi-bin/fakeupstream.cgi?upstream=crates.io/parking_lot .*/crates/parking_lot/@ANY_VERSION@/download - diff -Nru rust-parking-lot-0.7.1/README.md rust-parking-lot-0.10.0/README.md --- rust-parking-lot-0.7.1/README.md 2018-12-05 15:27:20.000000000 +0000 +++ rust-parking-lot-0.10.0/README.md 2019-11-25 21:09:35.000000000 +0000 @@ -34,7 +34,7 @@ parallelism. 2. Since they consist of just a single atomic variable, have constant initializers and don't need destructors, these primitives can be used as - `static` global variables. The standard library primitives require + `static` global variables. The standard library primitives require dynamic initialization and thus need to be lazily initialized with `lazy_static!`. 3. Uncontended lock acquisition and release is done through fast inline @@ -68,6 +68,9 @@ can be enabled via the `deadlock_detection` feature. 17. `RwLock` supports atomically upgrading an "upgradable" read lock into a write lock. +18. Optional support for [serde](https://docs.serde.rs/serde/). Enable via the + feature `serde`. **NOTE!** this support is for `Mutex`, `ReentrantMutex`, + and `RwLock` only; `Condvar` and `Once` are not currently supported. ## The parking lot @@ -84,9 +87,8 @@ There are a few restrictions when using this library on stable Rust: -- `Mutex` and `Once` will use 1 word of space instead of 1 byte. -- You will have to use `lazy_static!` to statically initialize `Mutex`, - `Condvar` and `RwLock` types instead of `const fn`. +- You will have to use `lazy_static!` or equivalent to statically initialize `Mutex` + and `RwLock` types. They use generics and can't be `const fn`s on stable yet. - `RwLock` will not be able to take advantage of hardware lock elision for readers, which improves performance when there are multiple readers. @@ -99,7 +101,7 @@ ```toml [dependencies] -parking_lot = "0.6" +parking_lot = "0.10" ``` and this to your crate root: @@ -112,7 +114,7 @@ ```toml [dependencies] -parking_lot = {version = "0.6", features = ["nightly"]} +parking_lot = { version = "0.10", features = ["nightly"] } ``` The experimental deadlock detector can be enabled with the @@ -124,7 +126,7 @@ ## Minimum Rust version -The current minimum required Rust version is 1.24. Any change to this is +The current minimum required Rust version is 1.36. Any change to this is considered a breaking change and will require a major version bump. ## License diff -Nru rust-parking-lot-0.7.1/src/condvar.rs rust-parking-lot-0.10.0/src/condvar.rs --- rust-parking-lot-0.7.1/src/condvar.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/condvar.rs 2019-11-25 21:09:35.000000000 +0000 @@ -5,15 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use deadlock; -use lock_api::RawMutex as RawMutexTrait; -use mutex::MutexGuard; +use crate::mutex::MutexGuard; +use crate::raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; +use crate::{deadlock, util}; +use core::{ + fmt, ptr, + sync::atomic::{AtomicPtr, Ordering}, +}; +use lock_api::RawMutex as RawMutex_; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; -use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; -use std::sync::atomic::{AtomicPtr, Ordering}; use std::time::{Duration, Instant}; -use std::{fmt, ptr}; -use util; /// A type indicating whether a timed wait on a condition variable returned /// due to a time out or not. @@ -23,7 +24,7 @@ impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] - pub fn timed_out(&self) -> bool { + pub fn timed_out(self) -> bool { self.0 } } @@ -77,9 +78,13 @@ /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); -/// while !*started { +/// if !*started { /// cvar.wait(&mut started); /// } +/// // Note that we used and if instead of a while loop above. This is only +/// // possible because parking_lot's Condvar will never spuriously wake up. +/// // This means that wait() will only return after notify_one or notify_all is +/// // called. /// ``` pub struct Condvar { state: AtomicPtr, @@ -88,7 +93,6 @@ impl Condvar { /// Creates a new condition variable which is ready to be waited on and /// notified. - #[cfg(feature = "nightly")] #[inline] pub const fn new() -> Condvar { Condvar { @@ -96,16 +100,6 @@ } } - /// Creates a new condition variable which is ready to be waited on and - /// notified. - #[cfg(not(feature = "nightly"))] - #[inline] - pub fn new() -> Condvar { - Condvar { - state: AtomicPtr::new(ptr::null_mut()), - } - } - /// Wakes up one blocked thread on this condvar. /// /// Returns whether a thread was woken up. @@ -141,7 +135,6 @@ } #[cold] - #[inline(never)] fn notify_one_slow(&self, mutex: *mut RawMutex) -> bool { unsafe { // Unpark one thread and requeue the rest onto the mutex @@ -203,7 +196,6 @@ } #[cold] - #[inline(never)] fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize { unsafe { // Unpark one thread and requeue the rest onto the mutex @@ -263,7 +255,7 @@ /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] - pub fn wait(&self, mutex_guard: &mut MutexGuard) { + pub fn wait(&self, mutex_guard: &mut MutexGuard<'_, T>) { self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None); } @@ -293,7 +285,7 @@ #[inline] pub fn wait_until( &self, - mutex_guard: &mut MutexGuard, + mutex_guard: &mut MutexGuard<'_, T>, timeout: Instant, ) -> WaitTimeoutResult { self.wait_until_internal( @@ -397,7 +389,7 @@ #[inline] pub fn wait_for( &self, - mutex_guard: &mut MutexGuard, + mutex_guard: &mut MutexGuard<'_, T>, timeout: Duration, ) -> WaitTimeoutResult { let deadline = util::to_deadline(timeout); @@ -413,18 +405,18 @@ } impl fmt::Debug for Condvar { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Condvar { .. }") } } #[cfg(test)] mod tests { + use crate::{Condvar, Mutex, MutexGuard}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; - use {Condvar, Mutex}; #[test] fn smoke() { @@ -626,7 +618,7 @@ rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&*c); - let _ = c.wait(&mut m3.lock()); + c.wait(&mut m3.lock()); } #[test] @@ -668,10 +660,393 @@ let mut g = m.lock(); while !c.notify_one() { // Wait for the thread to get into wait() - ::MutexGuard::bump(&mut g); + MutexGuard::bump(&mut g); } // The thread should have been requeued to the mutex, which we wake up now. drop(g); t.join().unwrap(); } + + #[test] + fn test_issue_129() { + let locks = Arc::new((Mutex::new(()), Condvar::new())); + + let (tx, rx) = channel(); + for _ in 0..4 { + let locks = locks.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut guard = locks.0.lock(); + locks.1.wait(&mut guard); + locks.1.wait_for(&mut guard, Duration::from_millis(1)); + locks.1.notify_one(); + tx.send(()).unwrap(); + }); + } + + thread::sleep(Duration::from_millis(100)); + locks.1.notify_one(); + + for _ in 0..4 { + assert_eq!(rx.recv_timeout(Duration::from_millis(500)), Ok(())); + } + } +} + +/// This module contains an integration test that is heavily inspired from WebKit's own integration +/// tests for it's own Condvar. +#[cfg(test)] +mod webkit_queue_test { + use crate::{Condvar, Mutex, MutexGuard}; + use std::{collections::VecDeque, sync::Arc, thread, time::Duration}; + + #[derive(Clone, Copy)] + enum Timeout { + Bounded(Duration), + Forever, + } + + #[derive(Clone, Copy)] + enum NotifyStyle { + One, + All, + } + + struct Queue { + items: VecDeque, + should_continue: bool, + } + + impl Queue { + fn new() -> Self { + Self { + items: VecDeque::new(), + should_continue: true, + } + } + } + + fn wait( + condition: &Condvar, + lock: &mut MutexGuard<'_, T>, + predicate: impl Fn(&mut MutexGuard<'_, T>) -> bool, + timeout: &Timeout, + ) { + while !predicate(lock) { + match timeout { + Timeout::Forever => condition.wait(lock), + Timeout::Bounded(bound) => { + condition.wait_for(lock, *bound); + } + } + } + } + + fn notify(style: NotifyStyle, condition: &Condvar, should_notify: bool) { + match style { + NotifyStyle::One => { + condition.notify_one(); + } + NotifyStyle::All => { + if should_notify { + condition.notify_all(); + } + } + } + } + + fn run_queue_test( + num_producers: usize, + num_consumers: usize, + max_queue_size: usize, + messages_per_producer: usize, + notify_style: NotifyStyle, + timeout: Timeout, + delay: Duration, + ) { + let input_queue = Arc::new(Mutex::new(Queue::new())); + let empty_condition = Arc::new(Condvar::new()); + let full_condition = Arc::new(Condvar::new()); + + let output_vec = Arc::new(Mutex::new(vec![])); + + let consumers = (0..num_consumers) + .map(|_| { + consumer_thread( + input_queue.clone(), + empty_condition.clone(), + full_condition.clone(), + timeout, + notify_style, + output_vec.clone(), + max_queue_size, + ) + }) + .collect::>(); + let producers = (0..num_producers) + .map(|_| { + producer_thread( + messages_per_producer, + input_queue.clone(), + empty_condition.clone(), + full_condition.clone(), + timeout, + notify_style, + max_queue_size, + ) + }) + .collect::>(); + + thread::sleep(delay); + + for producer in producers.into_iter() { + producer.join().expect("Producer thread panicked"); + } + + { + let mut input_queue = input_queue.lock(); + input_queue.should_continue = false; + } + empty_condition.notify_all(); + + for consumer in consumers.into_iter() { + consumer.join().expect("Consumer thread panicked"); + } + + let mut output_vec = output_vec.lock(); + assert_eq!(output_vec.len(), num_producers * messages_per_producer); + output_vec.sort(); + for msg_idx in 0..messages_per_producer { + for producer_idx in 0..num_producers { + assert_eq!(msg_idx, output_vec[msg_idx * num_producers + producer_idx]); + } + } + } + + fn consumer_thread( + input_queue: Arc>, + empty_condition: Arc, + full_condition: Arc, + timeout: Timeout, + notify_style: NotifyStyle, + output_queue: Arc>>, + max_queue_size: usize, + ) -> thread::JoinHandle<()> { + thread::spawn(move || loop { + let (should_notify, result) = { + let mut queue = input_queue.lock(); + wait( + &*empty_condition, + &mut queue, + |state| -> bool { !state.items.is_empty() || !state.should_continue }, + &timeout, + ); + if queue.items.is_empty() && !queue.should_continue { + return; + } + let should_notify = queue.items.len() == max_queue_size; + let result = queue.items.pop_front(); + std::mem::drop(queue); + (should_notify, result) + }; + notify(notify_style, &*full_condition, should_notify); + + if let Some(result) = result { + output_queue.lock().push(result); + } + }) + } + + fn producer_thread( + num_messages: usize, + queue: Arc>, + empty_condition: Arc, + full_condition: Arc, + timeout: Timeout, + notify_style: NotifyStyle, + max_queue_size: usize, + ) -> thread::JoinHandle<()> { + thread::spawn(move || { + for message in 0..num_messages { + let should_notify = { + let mut queue = queue.lock(); + wait( + &*full_condition, + &mut queue, + |state| state.items.len() < max_queue_size, + &timeout, + ); + let should_notify = queue.items.is_empty(); + queue.items.push_back(message); + std::mem::drop(queue); + should_notify + }; + notify(notify_style, &*empty_condition, should_notify); + } + }) + } + + macro_rules! run_queue_tests { + ( $( $name:ident( + num_producers: $num_producers:expr, + num_consumers: $num_consumers:expr, + max_queue_size: $max_queue_size:expr, + messages_per_producer: $messages_per_producer:expr, + notification_style: $notification_style:expr, + timeout: $timeout:expr, + delay_seconds: $delay_seconds:expr); + )* ) => { + $(#[test] + fn $name() { + let delay = Duration::from_secs($delay_seconds); + run_queue_test( + $num_producers, + $num_consumers, + $max_queue_size, + $messages_per_producer, + $notification_style, + $timeout, + delay, + ); + })* + }; + } + + run_queue_tests! { + sanity_check_queue( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Bounded(Duration::from_secs(1)), + delay_seconds: 0 + ); + sanity_check_queue_timeout( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + new_test_without_timeout_5( + num_producers: 1, + num_consumers: 5, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_one_consumer_one_slot( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_one_consumer_one_slot_timeout( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 1 + ); + one_producer_one_consumer_hundred_slots( + num_producers: 1, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 1_000_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_one_slot( + num_producers: 10, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_hundred_slots_notify_all( + num_producers: 10, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_hundred_slots_notify_one( + num_producers: 10, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 10000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_one_slot( + num_producers: 1, + num_consumers: 10, + max_queue_size: 1, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_hundred_slots_notify_all( + num_producers: 1, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_hundred_slots_notify_one( + num_producers: 1, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 100_000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_one_slot( + num_producers: 10, + num_consumers: 10, + max_queue_size: 1, + messages_per_producer: 50000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_hundred_slots_notify_all( + num_producers: 10, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 50000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_hundred_slots_notify_one( + num_producers: 10, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 50000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + } } diff -Nru rust-parking-lot-0.7.1/src/deadlock.rs rust-parking-lot-0.10.0/src/deadlock.rs --- rust-parking-lot-0.7.1/src/deadlock.rs 2018-12-05 15:27:20.000000000 +0000 +++ rust-parking-lot-0.10.0/src/deadlock.rs 2019-05-03 07:55:05.000000000 +0000 @@ -40,10 +40,15 @@ #[cfg(test)] #[cfg(feature = "deadlock_detection")] mod tests { + use crate::{Mutex, ReentrantMutex, RwLock}; use std::sync::{Arc, Barrier}; use std::thread::{self, sleep}; use std::time::Duration; - use {Mutex, ReentrantMutex, RwLock}; + + // We need to serialize these tests since deadlock detection uses global state + lazy_static::lazy_static! { + static ref DEADLOCK_DETECTION_LOCK: Mutex<()> = Mutex::new(()); + } fn check_deadlock() -> bool { use parking_lot_core::deadlock::check_deadlock; @@ -52,6 +57,8 @@ #[test] fn test_mutex_deadlock() { + let _guard = DEADLOCK_DETECTION_LOCK.lock(); + let m1: Arc> = Default::default(); let m2: Arc> = Default::default(); let m3: Arc> = Default::default(); @@ -95,6 +102,8 @@ #[test] fn test_mutex_deadlock_reentrant() { + let _guard = DEADLOCK_DETECTION_LOCK.lock(); + let m1: Arc> = Default::default(); assert!(!check_deadlock()); @@ -112,6 +121,8 @@ #[test] fn test_remutex_deadlock() { + let _guard = DEADLOCK_DETECTION_LOCK.lock(); + let m1: Arc> = Default::default(); let m2: Arc> = Default::default(); let m3: Arc> = Default::default(); @@ -158,6 +169,8 @@ #[test] fn test_rwlock_deadlock() { + let _guard = DEADLOCK_DETECTION_LOCK.lock(); + let m1: Arc> = Default::default(); let m2: Arc> = Default::default(); let m3: Arc> = Default::default(); @@ -199,8 +212,11 @@ assert!(!check_deadlock()); } + #[cfg(rwlock_deadlock_detection_not_supported)] #[test] fn test_rwlock_deadlock_reentrant() { + let _guard = DEADLOCK_DETECTION_LOCK.lock(); + let m1: Arc> = Default::default(); assert!(!check_deadlock()); diff -Nru rust-parking-lot-0.7.1/src/elision.rs rust-parking-lot-0.10.0/src/elision.rs --- rust-parking-lot-0.7.1/src/elision.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/elision.rs 2019-10-04 06:52:57.000000000 +0000 @@ -12,17 +12,14 @@ type IntType; // Perform a compare_exchange and start a transaction - fn elision_acquire( - &self, - current: Self::IntType, - new: Self::IntType, - ) -> Result; - // Perform a compare_exchange and end a transaction - fn elision_release( + fn elision_compare_exchange_acquire( &self, current: Self::IntType, new: Self::IntType, ) -> Result; + + // Perform a fetch_sub and end a transaction + fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType; } // Indicates whether the target architecture supports lock elision @@ -41,22 +38,23 @@ type IntType = usize; #[inline] - fn elision_acquire(&self, _: usize, _: usize) -> Result { + fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result { unreachable!(); } #[inline] - fn elision_release(&self, _: usize, _: usize) -> Result { + fn elision_fetch_sub_release(&self, _: usize) -> usize { unreachable!(); } } -#[cfg(all(feature = "nightly", target_arch = "x86"))] +#[cfg(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64")))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; + #[cfg(target_pointer_width = "32")] #[inline] - fn elision_acquire(&self, current: usize, new: usize) -> Result { + fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xacquire; lock; cmpxchgl $2, $1" @@ -71,55 +69,12 @@ } } } - - #[inline] - fn elision_release(&self, current: usize, new: usize) -> Result { - unsafe { - let prev: usize; - asm!("xrelease; lock; cmpxchgl $2, $1" - : "={eax}" (prev), "+*m" (self) - : "r" (new), "{eax}" (current) - : "memory" - : "volatile"); - if prev == current { - Ok(prev) - } else { - Err(prev) - } - } - } -} - -#[cfg(all( - feature = "nightly", - target_arch = "x86_64", - target_pointer_width = "32" -))] -impl AtomicElisionExt for AtomicUsize { - type IntType = usize; - - #[inline] - fn elision_acquire(&self, current: usize, new: usize) -> Result { - unsafe { - let prev: usize; - asm!("xacquire; lock; cmpxchgl $2, $1" - : "={rax}" (prev), "+*m" (self) - : "r" (new), "{rax}" (current) - : "memory" - : "volatile"); - if prev == current { - Ok(prev) - } else { - Err(prev) - } - } - } - + #[cfg(target_pointer_width = "64")] #[inline] - fn elision_release(&self, current: usize, new: usize) -> Result { + fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; - asm!("xrelease; lock; cmpxchgl $2, $1" + asm!("xacquire; lock; cmpxchgq $2, $1" : "={rax}" (prev), "+*m" (self) : "r" (new), "{rax}" (current) : "memory" @@ -131,47 +86,31 @@ } } } -} - -#[cfg(all( - feature = "nightly", - target_arch = "x86_64", - target_pointer_width = "64" -))] -impl AtomicElisionExt for AtomicUsize { - type IntType = usize; + #[cfg(target_pointer_width = "32")] #[inline] - fn elision_acquire(&self, current: usize, new: usize) -> Result { + fn elision_fetch_sub_release(&self, val: usize) -> usize { unsafe { let prev: usize; - asm!("xacquire; lock; cmpxchgq $2, $1" - : "={rax}" (prev), "+*m" (self) - : "r" (new), "{rax}" (current) + asm!("xrelease; lock; xaddl $2, $1" + : "=r" (prev), "+*m" (self) + : "0" (val.wrapping_neg()) : "memory" : "volatile"); - if prev == current { - Ok(prev) - } else { - Err(prev) - } + prev } } - + #[cfg(target_pointer_width = "64")] #[inline] - fn elision_release(&self, current: usize, new: usize) -> Result { + fn elision_fetch_sub_release(&self, val: usize) -> usize { unsafe { let prev: usize; - asm!("xrelease; lock; cmpxchgq $2, $1" - : "={rax}" (prev), "+*m" (self) - : "r" (new), "{rax}" (current) + asm!("xrelease; lock; xaddq $2, $1" + : "=r" (prev), "+*m" (self) + : "0" (val.wrapping_neg()) : "memory" : "volatile"); - if prev == current { - Ok(prev) - } else { - Err(prev) - } + prev } } } diff -Nru rust-parking-lot-0.7.1/src/lib.rs rust-parking-lot-0.10.0/src/lib.rs --- rust-parking-lot-0.7.1/src/lib.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/lib.rs 2019-10-04 06:54:21.000000000 +0000 @@ -10,13 +10,8 @@ //! standard library. It also provides a `ReentrantMutex` type. #![warn(missing_docs)] -#![cfg_attr(feature = "nightly", feature(const_fn))] -#![cfg_attr(feature = "nightly", feature(integer_atomics))] +#![warn(rust_2018_idioms)] #![cfg_attr(feature = "nightly", feature(asm))] -#![cfg_attr(feature = "nightly", feature(time_checked_add))] - -extern crate lock_api; -extern crate parking_lot_core; mod condvar; mod elision; @@ -33,13 +28,16 @@ #[cfg(not(feature = "deadlock_detection"))] mod deadlock; -pub use condvar::{Condvar, WaitTimeoutResult}; -pub use mutex::{MappedMutexGuard, Mutex, MutexGuard}; -pub use once::{Once, OnceState, ONCE_INIT}; -pub use raw_mutex::RawMutex; -pub use raw_rwlock::RawRwLock; -pub use remutex::{MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard}; -pub use rwlock::{ +pub use self::condvar::{Condvar, WaitTimeoutResult}; +pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard}; +pub use self::once::{Once, OnceState}; +pub use self::raw_mutex::RawMutex; +pub use self::raw_rwlock::RawRwLock; +pub use self::remutex::{ + MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard, +}; +pub use self::rwlock::{ MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard, }; +pub use ::lock_api; diff -Nru rust-parking-lot-0.7.1/src/mutex.rs rust-parking-lot-0.10.0/src/mutex.rs --- rust-parking-lot-0.7.1/src/mutex.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/mutex.rs 2019-11-25 21:09:35.000000000 +0000 @@ -5,8 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. +use crate::raw_mutex::RawMutex; use lock_api; -use raw_mutex::RawMutex; /// A mutual exclusion primitive useful for protecting shared data /// @@ -69,7 +69,7 @@ /// /// let (tx, rx) = channel(); /// for _ in 0..10 { -/// let (data, tx) = (data.clone(), tx.clone()); +/// let (data, tx) = (Arc::clone(&data), tx.clone()); /// thread::spawn(move || { /// // The shared state can only be accessed once the lock is held. /// // Our non-atomic increment is safe because we're the only thread @@ -105,11 +105,14 @@ #[cfg(test)] mod tests { + use crate::{Condvar, Mutex}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; - use {Condvar, Mutex}; + + #[cfg(feature = "serde")] + use bincode::{deserialize, serialize}; struct Packet(Arc<(Mutex, Condvar)>); @@ -242,7 +245,7 @@ fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { + let _ = thread::spawn(move || { struct Unwinder { i: Arc>, } @@ -284,16 +287,20 @@ let mutex = Mutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); - assert_eq!( - format!("{:#?}", mutex), - "Mutex { - data: [ - 0, - 10 - ] -}" - ); let _lock = mutex.lock(); - assert_eq!(format!("{:?}", mutex), "Mutex { }"); + assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); + } + + #[cfg(feature = "serde")] + #[test] + fn test_serde() { + let contents: Vec = vec![0, 1, 2]; + let mutex = Mutex::new(contents.clone()); + + let serialized = serialize(&mutex).unwrap(); + let deserialized: Mutex> = deserialize(&serialized).unwrap(); + + assert_eq!(*(mutex.lock()), *(deserialized.lock())); + assert_eq!(contents, *(deserialized.lock())); } } diff -Nru rust-parking-lot-0.7.1/src/once.rs rust-parking-lot-0.10.0/src/once.rs --- rust-parking-lot-0.7.1/src/once.rs 2018-12-05 15:27:20.000000000 +0000 +++ rust-parking-lot-0.10.0/src/once.rs 2019-11-25 21:09:35.000000000 +0000 @@ -5,26 +5,17 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use std::sync::atomic::{fence, Ordering}; -#[cfg(feature = "nightly")] -use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT}; -#[cfg(feature = "nightly")] -type U8 = u8; -#[cfg(not(feature = "nightly"))] -use std::sync::atomic::AtomicUsize as AtomicU8; -#[cfg(not(feature = "nightly"))] -use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT; -#[cfg(not(feature = "nightly"))] -type U8 = usize; +use crate::util::UncheckedOptionExt; +use core::{ + fmt, mem, + sync::atomic::{fence, AtomicU8, Ordering}, +}; use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; -use std::fmt; -use std::mem; -use util::UncheckedOptionExt; - -const DONE_BIT: U8 = 1; -const POISON_BIT: U8 = 2; -const LOCKED_BIT: U8 = 4; -const PARKED_BIT: U8 = 8; + +const DONE_BIT: u8 = 1; +const POISON_BIT: u8 = 2; +const LOCKED_BIT: u8 = 4; +const PARKED_BIT: u8 = 8; /// Current state of a `Once`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -48,8 +39,8 @@ /// Once an initialization routine for a `Once` has panicked it will forever /// indicate to future forced initialization routines that it is poisoned. #[inline] - pub fn poisoned(&self) -> bool { - match *self { + pub fn poisoned(self) -> bool { + match self { OnceState::Poisoned => true, _ => false, } @@ -58,8 +49,8 @@ /// Returns whether the associated `Once` has successfully executed a /// closure. #[inline] - pub fn done(&self) -> bool { - match *self { + pub fn done(self) -> bool { + match self { OnceState::Done => true, _ => false, } @@ -81,9 +72,9 @@ /// # Examples /// /// ``` -/// use parking_lot::{Once, ONCE_INIT}; +/// use parking_lot::Once; /// -/// static START: Once = ONCE_INIT; +/// static START: Once = Once::new(); /// /// START.call_once(|| { /// // run initialization here @@ -91,22 +82,11 @@ /// ``` pub struct Once(AtomicU8); -/// Initialization value for static `Once` values. -pub const ONCE_INIT: Once = Once(ATOMIC_U8_INIT); - impl Once { /// Creates a new `Once` value. - #[cfg(feature = "nightly")] #[inline] pub const fn new() -> Once { - Once(ATOMIC_U8_INIT) - } - - /// Creates a new `Once` value. - #[cfg(not(feature = "nightly"))] - #[inline] - pub fn new() -> Once { - Once(ATOMIC_U8_INIT) + Once(AtomicU8::new(0)) } /// Returns the current state of this `Once`. @@ -141,10 +121,10 @@ /// # Examples /// /// ``` - /// use parking_lot::{Once, ONCE_INIT}; + /// use parking_lot::Once; /// /// static mut VAL: usize = 0; - /// static INIT: Once = ONCE_INIT; + /// static INIT: Once = Once::new(); /// /// // Accessing a `static mut` is unsafe much of the time, but if we do so /// // in a synchronized fashion (e.g. write once or read all) then we're @@ -222,8 +202,7 @@ // currently no way to take an `FnOnce` and call it via virtual dispatch // without some allocation overhead. #[cold] - #[inline(never)] - fn call_once_slow(&self, ignore_poison: bool, f: &mut FnMut(OnceState)) { + fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) { let mut spinwait = SpinWait::new(); let mut state = self.0.load(Ordering::Relaxed); loop { @@ -344,7 +323,7 @@ } impl fmt::Debug for Once { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Once") .field("state", &self.state()) .finish() @@ -353,15 +332,14 @@ #[cfg(test)] mod tests { - #[cfg(feature = "nightly")] + use crate::Once; use std::panic; use std::sync::mpsc::channel; use std::thread; - use {Once, ONCE_INIT}; #[test] fn smoke_once() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); let mut a = 0; O.call_once(|| a += 1); assert_eq!(a, 1); @@ -371,7 +349,7 @@ #[test] fn stampede_once() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); static mut RUN: bool = false; let (tx, rx) = channel(); @@ -405,10 +383,9 @@ } } - #[cfg(feature = "nightly")] #[test] fn poison_bad() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); // poison the once let t = panic::catch_unwind(|| { @@ -434,10 +411,9 @@ O.call_once(|| {}); } - #[cfg(feature = "nightly")] #[test] fn wait_for_force_to_finish() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); // poison the once let t = panic::catch_unwind(|| { @@ -475,14 +451,8 @@ #[test] fn test_once_debug() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); assert_eq!(format!("{:?}", O), "Once { state: New }"); - assert_eq!( - format!("{:#?}", O), - "Once { - state: New -}" - ); } } diff -Nru rust-parking-lot-0.7.1/src/raw_mutex.rs rust-parking-lot-0.10.0/src/raw_mutex.rs --- rust-parking-lot-0.7.1/src/raw_mutex.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/raw_mutex.rs 2019-11-09 11:27:25.000000000 +0000 @@ -5,22 +5,14 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use std::sync::atomic::Ordering; -#[cfg(feature = "nightly")] -use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT}; -#[cfg(feature = "nightly")] -type U8 = u8; -#[cfg(not(feature = "nightly"))] -use std::sync::atomic::AtomicUsize as AtomicU8; -#[cfg(not(feature = "nightly"))] -use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT; -#[cfg(not(feature = "nightly"))] -type U8 = usize; -use deadlock; -use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed}; +use crate::{deadlock, util}; +use core::{ + sync::atomic::{AtomicU8, Ordering}, + time::Duration, +}; +use lock_api::{GuardNoSend, RawMutex as RawMutex_}; use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; -use std::time::{Duration, Instant}; -use util; +use std::time::Instant; // UnparkToken used to indicate that that the target thread should attempt to // lock the mutex again as soon as it is unparked. @@ -30,17 +22,42 @@ // thread directly without unlocking it. pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); -const LOCKED_BIT: U8 = 1; -const PARKED_BIT: U8 = 2; +/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread. +const LOCKED_BIT: u8 = 0b01; +/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being +/// parked if it wants to lock the mutex, but it is currently being held by some other thread. +const PARKED_BIT: u8 = 0b10; /// Raw mutex type backed by the parking lot. pub struct RawMutex { + /// This atomic integer holds the current state of the mutex instance. Only the two lowest bits + /// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits. + /// + /// # State table: + /// + /// PARKED_BIT | LOCKED_BIT | Description + /// 0 | 0 | The mutex is not locked, nor is anyone waiting for it. + /// -----------+------------+------------------------------------------------------------------ + /// 0 | 1 | The mutex is locked by exactly one thread. No other thread is + /// | | waiting for it. + /// -----------+------------+------------------------------------------------------------------ + /// 1 | 0 | The mutex is not locked. One or more thread is parked or about to + /// | | park. At least one of the parked threads are just about to be + /// | | unparked, or a thread heading for parking might abort the park. + /// -----------+------------+------------------------------------------------------------------ + /// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is + /// | | parked or about to park, waiting for the lock to become available. + /// | | In this state, PARKED_BIT is only ever cleared when a bucket lock + /// | | is held (i.e. in a parking_lot_core callback). This ensures that + /// | | we never end up in a situation where there are parked threads but + /// | | PARKED_BIT is not set (which would result in those threads + /// | | potentially never getting woken up). state: AtomicU8, } -unsafe impl RawMutexTrait for RawMutex { +unsafe impl lock_api::RawMutex for RawMutex { const INIT: RawMutex = RawMutex { - state: ATOMIC_U8_INIT, + state: AtomicU8::new(0), }; type GuardMarker = GuardNoSend; @@ -84,7 +101,7 @@ unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state - .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; @@ -93,13 +110,13 @@ } } -unsafe impl RawMutexFair for RawMutex { +unsafe impl lock_api::RawMutexFair for RawMutex { #[inline] fn unlock_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state - .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; @@ -115,7 +132,7 @@ } } -unsafe impl RawMutexTimed for RawMutex { +unsafe impl lock_api::RawMutexTimed for RawMutex { type Duration = Duration; type Instant = Instant; @@ -184,7 +201,6 @@ } #[cold] - #[inline(never)] fn lock_slow(&self, timeout: Option) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); @@ -223,37 +239,41 @@ } // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( + let addr = self as *const _ as usize; + let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + match unsafe { + parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, + ) + } { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), - // The validation function failed, try locking again - ParkResult::Invalid => (), + // The validation function failed, try locking again + ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } + // Timeout expired + ParkResult::TimedOut => return false, } // Loop back and try locking again @@ -263,48 +283,40 @@ } #[cold] - #[inline(never)] fn unlock_slow(&self, force_fair: bool) { - // Unlock directly if there are no parked threads - if self - .state - .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; - } - // Unpark one thread and leave the parked bit set if there might // still be parked threads on this address. - unsafe { - let addr = self as *const _ as usize; - let callback = |result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // mutex locked and hand it off to the unparked thread. - if result.unparked_threads != 0 && (force_fair || result.be_fair) { - // Clear the parked bit if there are no more parked - // threads. - if !result.have_more_threads { - self.state.store(LOCKED_BIT, Ordering::Relaxed); - } - return TOKEN_HANDOFF; + let addr = self as *const _ as usize; + let callback = |result: UnparkResult| { + // If we are using a fair unlock then we should keep the + // mutex locked and hand it off to the unparked thread. + if result.unparked_threads != 0 && (force_fair || result.be_fair) { + // Clear the parked bit if there are no more parked + // threads. + if !result.have_more_threads { + self.state.store(LOCKED_BIT, Ordering::Relaxed); } + return TOKEN_HANDOFF; + } - // Clear the locked bit, and the parked bit as well if there - // are no more parked threads. - if result.have_more_threads { - self.state.store(PARKED_BIT, Ordering::Release); - } else { - self.state.store(0, Ordering::Release); - } - TOKEN_NORMAL - }; + // Clear the locked bit, and the parked bit as well if there + // are no more parked threads. + if result.have_more_threads { + self.state.store(PARKED_BIT, Ordering::Release); + } else { + self.state.store(0, Ordering::Release); + } + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. + unsafe { parking_lot_core::unpark_one(addr, callback); } } #[cold] - #[inline(never)] fn bump_slow(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; self.unlock_slow(true); diff -Nru rust-parking-lot-0.7.1/src/raw_rwlock.rs rust-parking-lot-0.10.0/src/raw_rwlock.rs --- rust-parking-lot-0.7.1/src/raw_rwlock.rs 2019-01-01 14:02:59.000000000 +0000 +++ rust-parking-lot-0.10.0/src/raw_rwlock.rs 2019-10-04 06:54:21.000000000 +0000 @@ -5,47 +5,60 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use deadlock; -use elision::{have_elision, AtomicElisionExt}; -use lock_api::{ - GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair, - RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade, - RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed, +use crate::elision::{have_elision, AtomicElisionExt}; +use crate::raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL}; +use crate::util; +use core::{ + cell::Cell, + sync::atomic::{AtomicUsize, Ordering}, +}; +use lock_api::{GuardNoSend, RawRwLock as RawRwLock_, RawRwLockUpgrade}; +use parking_lot_core::{ + self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken, }; -use parking_lot_core::{self, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult}; -use raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL}; -use std::cell::Cell; -use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; use std::time::{Duration, Instant}; -use util; -const PARKED_BIT: usize = 0b001; -const UPGRADING_BIT: usize = 0b010; -// A shared guard acquires a single guard resource -const SHARED_GUARD: usize = 0b100; -const GUARD_COUNT_MASK: usize = !(SHARED_GUARD - 1); -// An exclusive lock acquires all of guard resource (i.e. it is exclusive) -const EXCLUSIVE_GUARD: usize = GUARD_COUNT_MASK; -// An upgradable lock acquires just over half of the guard resource -// This should be (GUARD_COUNT_MASK + SHARED_GUARD) >> 1, however this might -// overflow, so we shift before adding (which is okay since the least -// significant bit is zero for both GUARD_COUNT_MASK and SHARED_GUARD) -const UPGRADABLE_GUARD: usize = (GUARD_COUNT_MASK >> 1) + (SHARED_GUARD >> 1); - -// Token indicating what type of lock queued threads are trying to acquire -const TOKEN_SHARED: ParkToken = ParkToken(SHARED_GUARD); -const TOKEN_EXCLUSIVE: ParkToken = ParkToken(EXCLUSIVE_GUARD); -const TOKEN_UPGRADABLE: ParkToken = ParkToken(UPGRADABLE_GUARD); -const TOKEN_UPGRADING: ParkToken = ParkToken((EXCLUSIVE_GUARD - UPGRADABLE_GUARD) | UPGRADING_BIT); +// This reader-writer lock implementation is based on Boost's upgrade_mutex: +// https://github.com/boostorg/thread/blob/fc08c1fe2840baeeee143440fba31ef9e9a813c8/include/boost/thread/v2/shared_mutex.hpp#L432 +// +// This implementation uses 2 wait queues, one at key [addr] and one at key +// [addr + 1]. The primary queue is used for all new waiting threads, and the +// secondary queue is used by the thread which has acquired WRITER_BIT but is +// waiting for the remaining readers to exit the lock. +// +// This implementation is fair between readers and writers since it uses the +// order in which threads first started queuing to alternate between read phases +// and write phases. In particular is it not vulnerable to write starvation +// since readers will block if there is a pending writer. + +// There is at least one thread in the main queue. +const PARKED_BIT: usize = 0b0001; +// There is a parked thread holding WRITER_BIT. WRITER_BIT must be set. +const WRITER_PARKED_BIT: usize = 0b0010; +// A reader is holding an upgradable lock. The reader count must be non-zero and +// WRITER_BIT must not be set. +const UPGRADABLE_BIT: usize = 0b0100; +// If the reader count is zero: a writer is currently holding an exclusive lock. +// Otherwise: a writer is waiting for the remaining readers to exit the lock. +const WRITER_BIT: usize = 0b1000; +// Mask of bits used to count readers. +const READERS_MASK: usize = !0b1111; +// Base unit for counting readers. +const ONE_READER: usize = 0b10000; + +// Token indicating what type of lock a queued thread is trying to acquire +const TOKEN_SHARED: ParkToken = ParkToken(ONE_READER); +const TOKEN_EXCLUSIVE: ParkToken = ParkToken(WRITER_BIT); +const TOKEN_UPGRADABLE: ParkToken = ParkToken(ONE_READER | UPGRADABLE_BIT); /// Raw reader-writer lock type backed by the parking lot. pub struct RawRwLock { state: AtomicUsize, } -unsafe impl RawRwLockTrait for RawRwLock { +unsafe impl lock_api::RawRwLock for RawRwLock { const INIT: RawRwLock = RawRwLock { - state: ATOMIC_USIZE_INIT, + state: AtomicUsize::new(0), }; type GuardMarker = GuardNoSend; @@ -54,23 +67,23 @@ fn lock_exclusive(&self) { if self .state - .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_err() { let result = self.lock_exclusive_slow(None); debug_assert!(result); } - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } #[inline] fn try_lock_exclusive(&self) -> bool { if self .state - .compare_exchange(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); true } else { false @@ -79,10 +92,10 @@ #[inline] fn unlock_exclusive(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; + self.deadlock_release(); if self .state - .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed) + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; @@ -96,7 +109,7 @@ let result = self.lock_shared_slow(false, None); debug_assert!(result); } - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } #[inline] @@ -107,85 +120,38 @@ self.try_lock_shared_slow(false) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } #[inline] fn unlock_shared(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - let state = self.state.load(Ordering::Relaxed); - if state & PARKED_BIT == 0 - || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD) - { - if have_elision() { - if self - .state - .elision_release(state, state - SHARED_GUARD) - .is_ok() - { - return; - } - } else { - if self - .state - .compare_exchange_weak( - state, - state - SHARED_GUARD, - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - return; - } - } + self.deadlock_release(); + let state = if have_elision() { + self.state.elision_fetch_sub_release(ONE_READER) + } else { + self.state.fetch_sub(ONE_READER, Ordering::Release) + }; + if state & (READERS_MASK | WRITER_PARKED_BIT) == (ONE_READER | WRITER_PARKED_BIT) { + self.unlock_shared_slow(); } - self.unlock_shared_slow(false); } } -unsafe impl RawRwLockFair for RawRwLock { +unsafe impl lock_api::RawRwLockFair for RawRwLock { #[inline] fn unlock_shared_fair(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - let state = self.state.load(Ordering::Relaxed); - if state & PARKED_BIT == 0 - || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD) - { - if have_elision() { - if self - .state - .elision_release(state, state - SHARED_GUARD) - .is_ok() - { - return; - } - } else { - if self - .state - .compare_exchange_weak( - state, - state - SHARED_GUARD, - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - return; - } - } - } - self.unlock_shared_slow(true); + // Shared unlocking is always fair in this implementation. + self.unlock_shared(); } #[inline] fn unlock_exclusive_fair(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; + self.deadlock_release(); if self .state - .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed) + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; @@ -195,7 +161,9 @@ #[inline] fn bump_shared(&self) { - if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { + if self.state.load(Ordering::Relaxed) & (READERS_MASK | WRITER_BIT) + == ONE_READER | WRITER_BIT + { self.bump_shared_slow(); } } @@ -208,12 +176,12 @@ } } -unsafe impl RawRwLockDowngrade for RawRwLock { +unsafe impl lock_api::RawRwLockDowngrade for RawRwLock { #[inline] fn downgrade(&self) { let state = self .state - .fetch_sub(EXCLUSIVE_GUARD - SHARED_GUARD, Ordering::Release); + .fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); // Wake up parked shared and upgradable threads if there are any if state & PARKED_BIT != 0 { @@ -222,7 +190,7 @@ } } -unsafe impl RawRwLockTimed for RawRwLock { +unsafe impl lock_api::RawRwLockTimed for RawRwLock { type Duration = Duration; type Instant = Instant; @@ -234,7 +202,7 @@ self.lock_shared_slow(false, util::to_deadline(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } @@ -247,7 +215,7 @@ self.lock_shared_slow(false, Some(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } @@ -256,7 +224,7 @@ fn try_lock_exclusive_for(&self, timeout: Duration) -> bool { let result = if self .state - .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true @@ -264,7 +232,7 @@ self.lock_exclusive_slow(util::to_deadline(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } @@ -273,7 +241,7 @@ fn try_lock_exclusive_until(&self, timeout: Instant) -> bool { let result = if self .state - .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true @@ -281,20 +249,20 @@ self.lock_exclusive_slow(Some(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } } -unsafe impl RawRwLockRecursive for RawRwLock { +unsafe impl lock_api::RawRwLockRecursive for RawRwLock { #[inline] fn lock_shared_recursive(&self) { if !self.try_lock_shared_fast(true) { let result = self.lock_shared_slow(true, None); debug_assert!(result); } - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } #[inline] @@ -305,13 +273,13 @@ self.try_lock_shared_slow(true) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } } -unsafe impl RawRwLockRecursiveTimed for RawRwLock { +unsafe impl lock_api::RawRwLockRecursiveTimed for RawRwLock { #[inline] fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(true) { @@ -320,7 +288,7 @@ self.lock_shared_slow(true, util::to_deadline(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } @@ -333,20 +301,20 @@ self.lock_shared_slow(true, Some(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } } -unsafe impl RawRwLockUpgrade for RawRwLock { +unsafe impl lock_api::RawRwLockUpgrade for RawRwLock { #[inline] fn lock_upgradable(&self) { if !self.try_lock_upgradable_fast() { let result = self.lock_upgradable_slow(None); debug_assert!(result); } - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } #[inline] @@ -357,47 +325,51 @@ self.try_lock_upgradable_slow() }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } #[inline] fn unlock_upgradable(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - if self - .state - .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; + self.deadlock_release(); + let state = self.state.load(Ordering::Relaxed); + if state & PARKED_BIT == 0 { + if self + .state + .compare_exchange_weak( + state, + state - (ONE_READER | UPGRADABLE_BIT), + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + return; + } } self.unlock_upgradable_slow(false); } #[inline] fn upgrade(&self) { - if self - .state - .compare_exchange_weak( - UPGRADABLE_GUARD, - EXCLUSIVE_GUARD, - Ordering::Relaxed, - Ordering::Relaxed, - ) - .is_err() - { + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); + if state & READERS_MASK != ONE_READER { let result = self.upgrade_slow(None); debug_assert!(result); } } + #[inline] fn try_upgrade(&self) -> bool { if self .state .compare_exchange_weak( - UPGRADABLE_GUARD, - EXCLUSIVE_GUARD, + ONE_READER | UPGRADABLE_BIT, + WRITER_BIT, Ordering::Relaxed, Ordering::Relaxed, ) @@ -410,46 +382,53 @@ } } -unsafe impl RawRwLockUpgradeFair for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeFair for RawRwLock { #[inline] fn unlock_upgradable_fair(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - if self - .state - .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; + self.deadlock_release(); + let state = self.state.load(Ordering::Relaxed); + if state & PARKED_BIT == 0 { + if self + .state + .compare_exchange_weak( + state, + state - (ONE_READER | UPGRADABLE_BIT), + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + return; + } } - self.unlock_upgradable_slow(true); + self.unlock_upgradable_slow(false); } #[inline] fn bump_upgradable(&self) { - if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { + if self.state.load(Ordering::Relaxed) == ONE_READER | UPGRADABLE_BIT | PARKED_BIT { self.bump_upgradable_slow(); } } } -unsafe impl RawRwLockUpgradeDowngrade for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeDowngrade for RawRwLock { #[inline] fn downgrade_upgradable(&self) { - let state = self - .state - .fetch_sub(UPGRADABLE_GUARD - SHARED_GUARD, Ordering::Relaxed); + let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed); - // Wake up parked shared and upgradable threads if there are any + // Wake up parked upgradable threads if there are any if state & PARKED_BIT != 0 { - self.downgrade_upgradable_slow(state); + self.downgrade_slow(); } } #[inline] fn downgrade_to_upgradable(&self) { - let state = self - .state - .fetch_sub(EXCLUSIVE_GUARD - UPGRADABLE_GUARD, Ordering::Release); + let state = self.state.fetch_add( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Release, + ); // Wake up parked shared threads if there are any if state & PARKED_BIT != 0 { @@ -458,7 +437,7 @@ } } -unsafe impl RawRwLockUpgradeTimed for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeTimed for RawRwLock { #[inline] fn try_lock_upgradable_until(&self, timeout: Instant) -> bool { let result = if self.try_lock_upgradable_fast() { @@ -467,7 +446,7 @@ self.lock_upgradable_slow(Some(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } @@ -480,23 +459,18 @@ self.lock_upgradable_slow(util::to_deadline(timeout)) }; if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + self.deadlock_acquire(); } result } #[inline] fn try_upgrade_until(&self, timeout: Instant) -> bool { - if self - .state - .compare_exchange_weak( - UPGRADABLE_GUARD, - EXCLUSIVE_GUARD, - Ordering::Relaxed, - Ordering::Relaxed, - ) - .is_ok() - { + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); + if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(Some(timeout)) @@ -505,16 +479,11 @@ #[inline] fn try_upgrade_for(&self, timeout: Duration) -> bool { - if self - .state - .compare_exchange_weak( - UPGRADABLE_GUARD, - EXCLUSIVE_GUARD, - Ordering::Relaxed, - Ordering::Relaxed, - ) - .is_ok() - { + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); + if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(util::to_deadline(timeout)) @@ -527,18 +496,25 @@ fn try_lock_shared_fast(&self, recursive: bool) -> bool { let state = self.state.load(Ordering::Relaxed); - // We can't allow grabbing a shared lock while there are parked threads - // since that could lead to writer starvation. - if !recursive && state & PARKED_BIT != 0 { - return false; + // We can't allow grabbing a shared lock if there is a writer, even if + // the writer is still waiting for the remaining readers to exit. + if state & WRITER_BIT != 0 { + // To allow recursive locks, we make an exception and allow readers + // to skip ahead of a pending writer to avoid deadlocking, at the + // cost of breaking the fairness guarantees. + if !recursive || state & READERS_MASK == 0 { + return false; + } } // Use hardware lock elision to avoid cache conflicts when multiple // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && state == 0 { - self.state.elision_acquire(0, SHARED_GUARD).is_ok() - } else if let Some(new_state) = state.checked_add(SHARED_GUARD) { + self.state + .elision_compare_exchange_acquire(0, ONE_READER) + .is_ok() + } else if let Some(new_state) = state.checked_add(ONE_READER) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) .is_ok() @@ -547,17 +523,48 @@ } } + #[cold] + fn try_lock_shared_slow(&self, recursive: bool) -> bool { + let mut state = self.state.load(Ordering::Relaxed); + loop { + // This mirrors the condition in try_lock_shared_fast + if state & WRITER_BIT != 0 { + if !recursive || state & READERS_MASK == 0 { + return false; + } + } + if have_elision() && state == 0 { + match self.state.elision_compare_exchange_acquire(0, ONE_READER) { + Ok(_) => return true, + Err(x) => state = x, + } + } else { + match self.state.compare_exchange_weak( + state, + state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => state = x, + } + } + } + } + #[inline(always)] fn try_lock_upgradable_fast(&self) -> bool { let state = self.state.load(Ordering::Relaxed); - // We can't allow grabbing an upgradable lock while there are parked threads - // since that could lead to writer starvation. - if state & PARKED_BIT != 0 { + // We can't grab an upgradable lock if there is already a writer or + // upgradable reader. + if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { return false; } - if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) { + if let Some(new_state) = state.checked_add(ONE_READER | UPGRADABLE_BIT) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) .is_ok() @@ -567,808 +574,547 @@ } #[cold] - #[inline(never)] - fn lock_exclusive_slow(&self, timeout: Option) -> bool { - let mut spinwait = SpinWait::new(); + fn try_lock_upgradable_slow(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { - // Grab the lock if it isn't locked, even if there are other - // threads parked. - if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD) { - match self.state.compare_exchange_weak( - state, - new_state, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - continue; + // This mirrors the condition in try_lock_upgradable_fast + if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; } - // If there are no parked threads and only one reader or writer, try - // spinning a few times. - if (state == EXCLUSIVE_GUARD || state == SHARED_GUARD || state == UPGRADABLE_GUARD) - && spinwait.spin() - { - state = self.state.load(Ordering::Relaxed); - continue; + match self.state.compare_exchange_weak( + state, + state + .checked_add(ONE_READER | UPGRADABLE_BIT) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => state = x, } + } + } - // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // If the rwlock is free, abort the park and try to grab - // it immediately. - if state & GUARD_COUNT_MASK == 0 { - return false; - } - - // Nothing to do if the parked bit is already set - if state & PARKED_BIT != 0 { - return true; - } + #[cold] + fn lock_exclusive_slow(&self, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + loop { + if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; + } - // Set the parked bit - match self.state.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - TOKEN_EXCLUSIVE, - timeout, + // Grab WRITER_BIT if it isn't set, even if there are parked threads. + match self.state.compare_exchange_weak( + *state, + *state | WRITER_BIT, + Ordering::Acquire, + Ordering::Relaxed, ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, - - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), - - // The validation function failed, try locking again - ParkResult::Invalid => (), - - // Timeout expired - ParkResult::TimedOut => return false, + Ok(_) => return true, + Err(x) => *state = x, } } + }; - // Loop back and try locking again - spinwait.reset(); - state = self.state.load(Ordering::Relaxed); + // Step 1: grab exclusive ownership of WRITER_BIT + let timed_out = !self.lock_common( + timeout, + TOKEN_EXCLUSIVE, + try_lock, + WRITER_BIT | UPGRADABLE_BIT, + ); + if timed_out { + return false; } + + // Step 2: wait for all remaining readers to exit the lock. + self.wait_for_readers(timeout, 0) } #[cold] - #[inline(never)] fn unlock_exclusive_slow(&self, force_fair: bool) { - // Unlock directly if there are no parked threads - if self - .state - .compare_exchange(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; - }; - - // There are threads to unpark. We unpark threads up to the guard capacity. - let guard_count = Cell::new(0usize); - unsafe { - let addr = self as *const _ as usize; - let filter = |ParkToken(token)| -> FilterOp { - match guard_count.get().checked_add(token) { - Some(new_guard_count) => { - guard_count.set(new_guard_count); - FilterOp::Unpark - } - None => FilterOp::Stop, + // There are threads to unpark. Try to unpark as many as we can. + let callback = |mut new_state, result: UnparkResult| { + // If we are using a fair unlock then we should keep the + // rwlock locked and hand it off to the unparked threads. + if result.unparked_threads != 0 && (force_fair || result.be_fair) { + if result.have_more_threads { + new_state |= PARKED_BIT; } - }; - let callback = |result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // rwlock locked and hand it off to the unparked threads. - if result.unparked_threads != 0 && (force_fair || result.be_fair) { - // We need to set the guard count accordingly. - let mut new_state = guard_count.get(); - - if result.have_more_threads { - new_state |= PARKED_BIT; - } - - self.state.store(new_state, Ordering::Release); - TOKEN_HANDOFF + self.state.store(new_state, Ordering::Release); + TOKEN_HANDOFF + } else { + // Clear the parked bit if there are no more parked threads. + if result.have_more_threads { + self.state.store(PARKED_BIT, Ordering::Release); } else { - // Clear the parked bit if there are no more parked threads. - if result.have_more_threads { - self.state.store(PARKED_BIT, Ordering::Release); - } else { - self.state.store(0, Ordering::Release); - } - TOKEN_NORMAL + self.state.store(0, Ordering::Release); } - }; - parking_lot_core::unpark_filter(addr, filter, callback); + TOKEN_NORMAL + } + }; + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(0, callback); } } #[cold] - #[inline(never)] - fn downgrade_slow(&self) { - unsafe { - let addr = self as *const _ as usize; - let mut guard_count = SHARED_GUARD; - let filter = |ParkToken(token)| -> FilterOp { - match guard_count.checked_add(token) { - Some(new_guard_count) => { - guard_count = new_guard_count; - FilterOp::Unpark + fn lock_shared_slow(&self, recursive: bool, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + loop { + // Use hardware lock elision to avoid cache conflicts when multiple + // readers try to acquire the lock. We only do this if the lock is + // completely empty since elision handles conflicts poorly. + if have_elision() && *state == 0 { + match self.state.elision_compare_exchange_acquire(0, ONE_READER) { + Ok(_) => return true, + Err(x) => *state = x, } - None => FilterOp::Stop, } - }; - let callback = |result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + + // This is the same condition as try_lock_shared_fast + if *state & WRITER_BIT != 0 { + if !recursive || *state & READERS_MASK == 0 { + return false; + } } - TOKEN_NORMAL - }; - parking_lot_core::unpark_filter(addr, filter, callback); - } + + if self + .state + .compare_exchange_weak( + *state, + state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return true; + } + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + *state = self.state.load(Ordering::Relaxed); + } + }; + self.lock_common(timeout, TOKEN_SHARED, try_lock, WRITER_BIT) } #[cold] - #[inline(never)] - fn downgrade_to_upgradable_slow(&self) { + fn unlock_shared_slow(&self) { + // At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We + // just need to wake up a potentially sleeping pending writer. + // Using the 2nd key at addr + 1 + let addr = self as *const _ as usize + 1; + let callback = |_result: UnparkResult| { + // Clear the WRITER_PARKED_BIT here since there can only be one + // parked writer thread. + self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. unsafe { - let addr = self as *const _ as usize; - let mut guard_count = UPGRADABLE_GUARD; - let filter = |ParkToken(token)| -> FilterOp { - match guard_count.checked_add(token) { - Some(new_guard_count) => { - guard_count = new_guard_count; - FilterOp::Unpark - } - None => FilterOp::Stop, - } - }; - let callback = |result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; - parking_lot_core::unpark_filter(addr, filter, callback); + parking_lot_core::unpark_one(addr, callback); } } #[cold] - #[inline(never)] - fn lock_shared_slow(&self, recursive: bool, timeout: Option) -> bool { - let mut spinwait = SpinWait::new(); - let mut spinwait_shared = SpinWait::new(); - let mut state = self.state.load(Ordering::Relaxed); - let mut unparked = false; - loop { - // Use hardware lock elision to avoid cache conflicts when multiple - // readers try to acquire the lock. We only do this if the lock is - // completely empty since elision handles conflicts poorly. - if have_elision() && state == 0 { - match self.state.elision_acquire(0, SHARED_GUARD) { - Ok(_) => return true, - Err(x) => state = x, + fn lock_upgradable_slow(&self, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + loop { + if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; } - } - - // Grab the lock if there are no exclusive threads locked or - // waiting. However if we were unparked then we are allowed to grab - // the lock even if there are pending exclusive threads. - if unparked || recursive || state & PARKED_BIT == 0 { - if let Some(new_state) = state.checked_add(SHARED_GUARD) { - if self - .state - .compare_exchange_weak( - state, - new_state, - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; - } - - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - state = self.state.load(Ordering::Relaxed); - continue; - } else { - // We were unparked spuriously, reset unparked flag. - unparked = false; - } - } - - // If there are no parked threads, try spinning a few times - if state & PARKED_BIT == 0 && spinwait.spin() { - state = self.state.load(Ordering::Relaxed); - continue; - } - - // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Nothing to do if the parked bit is already set - if state & PARKED_BIT != 0 { - return true; - } - - // If the parked bit is not set then it means we are at - // the front of the queue. If there is space for another - // lock then we should abort the park and try acquiring - // the lock again. - if state & GUARD_COUNT_MASK != GUARD_COUNT_MASK { - return false; - } - - // Set the parked bit - match self.state.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - TOKEN_SHARED, - timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, - - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), - - // The validation function failed, try locking again - ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } - } - - // Loop back and try locking again - spinwait.reset(); - spinwait_shared.reset(); - state = self.state.load(Ordering::Relaxed); - unparked = true; - } - } - - #[cold] - #[inline(never)] - fn try_lock_shared_slow(&self, recursive: bool) -> bool { - let mut state = self.state.load(Ordering::Relaxed); - loop { - if !recursive && state & PARKED_BIT != 0 { - return false; - } - if have_elision() && state == 0 { - match self.state.elision_acquire(0, SHARED_GUARD) { - Ok(_) => return true, - Err(x) => state = x, - } - } else { - match state.checked_add(SHARED_GUARD) { - Some(new_state) => match self.state.compare_exchange_weak( - state, - new_state, + if self + .state + .compare_exchange_weak( + *state, + state + .checked_add(ONE_READER | UPGRADABLE_BIT) + .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - }, - None => return false, + ) + .is_ok() + { + return true; } + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + *state = self.state.load(Ordering::Relaxed); } - } + }; + self.lock_common( + timeout, + TOKEN_UPGRADABLE, + try_lock, + WRITER_BIT | UPGRADABLE_BIT, + ) } #[cold] - #[inline(never)] - fn unlock_shared_slow(&self, force_fair: bool) { + fn unlock_upgradable_slow(&self, force_fair: bool) { + // Just release the lock if there are no parked threads. let mut state = self.state.load(Ordering::Relaxed); - loop { - // Just release the lock if there are no parked thread or if we are - // not the last shared thread. - if state & PARKED_BIT == 0 - || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD) - || (state & UPGRADING_BIT != 0 - && state & GUARD_COUNT_MASK != UPGRADABLE_GUARD + SHARED_GUARD) - { - match self.state.compare_exchange_weak( - state, - state - SHARED_GUARD, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => return, - Err(x) => state = x, - } - continue; + while state & PARKED_BIT == 0 { + match self.state.compare_exchange_weak( + state, + state - (ONE_READER | UPGRADABLE_BIT), + Ordering::Release, + Ordering::Relaxed, + ) { + Ok(_) => return, + Err(x) => state = x, } - - break; } - // There are threads to unpark. If there is a thread waiting to be - // upgraded, we find that thread and let it upgrade, otherwise we - // unpark threads up to the guard capacity. Note that there is a - // potential race condition here: another thread might grab a shared - // lock between now and when we actually release our lock. - let additional_guards = Cell::new(0usize); - let has_upgraded = Cell::new(false); - unsafe { - let addr = self as *const _ as usize; - let filter = |ParkToken(token)| -> FilterOp { - // We need to check UPGRADING_BIT while holding the bucket lock, - // otherwise we might miss a thread trying to upgrade. - if self.state.load(Ordering::Relaxed) & UPGRADING_BIT == 0 { - match additional_guards.get().checked_add(token) { - Some(x) => { - additional_guards.set(x); - FilterOp::Unpark - } - None => FilterOp::Stop, - } - } else if has_upgraded.get() { - FilterOp::Stop - } else { - if token & UPGRADING_BIT != 0 { - additional_guards.set(token & !UPGRADING_BIT); - has_upgraded.set(true); - FilterOp::Unpark + // There are threads to unpark. Try to unpark as many as we can. + let callback = |new_state, result: UnparkResult| { + // If we are using a fair unlock then we should keep the + // rwlock locked and hand it off to the unparked threads. + let mut state = self.state.load(Ordering::Relaxed); + if force_fair || result.be_fair { + // Fall back to normal unpark on overflow. Panicking is + // not allowed in parking_lot callbacks. + while let Some(mut new_state) = + (state - (ONE_READER | UPGRADABLE_BIT)).checked_add(new_state) + { + if result.have_more_threads { + new_state |= PARKED_BIT; } else { - FilterOp::Skip - } - } - }; - let callback = |result: UnparkResult| { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Release our shared lock - let mut new_state = state - SHARED_GUARD; - - // Clear the parked bit if there are no more threads in - // the queue. - if !result.have_more_threads { new_state &= !PARKED_BIT; } - - // Clear the upgrading bit if we are upgrading a thread. - if has_upgraded.get() { - new_state &= !UPGRADING_BIT; - } - - // Consider using fair unlocking. If we are, then we should set - // the state to the new value and tell the threads that we are - // handing the lock directly. - let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) { - match new_state.checked_add(additional_guards.get()) { - Some(x) => { - new_state = x; - TOKEN_HANDOFF - } - None => TOKEN_NORMAL, - } - } else { - TOKEN_NORMAL - }; - match self.state.compare_exchange_weak( state, new_state, - Ordering::Release, + Ordering::Relaxed, Ordering::Relaxed, ) { - Ok(_) => return token, + Ok(_) => return TOKEN_HANDOFF, Err(x) => state = x, } } - }; - parking_lot_core::unpark_filter(addr, filter, callback); - } - } - - #[cold] - #[inline(never)] - fn lock_upgradable_slow(&self, timeout: Option) -> bool { - let mut spinwait = SpinWait::new(); - let mut spinwait_shared = SpinWait::new(); - let mut state = self.state.load(Ordering::Relaxed); - let mut unparked = false; - loop { - // Grab the lock if there are no exclusive or upgradable threads - // locked or waiting. However if we were unparked then we are - // allowed to grab the lock even if there are pending exclusive threads. - if unparked || state & PARKED_BIT == 0 { - if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) { - if self - .state - .compare_exchange_weak( - state, - new_state, - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; - } + } - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - state = self.state.load(Ordering::Relaxed); - continue; + // Otherwise just release the upgradable lock and update PARKED_BIT. + loop { + let mut new_state = state - (ONE_READER | UPGRADABLE_BIT); + if result.have_more_threads { + new_state |= PARKED_BIT; } else { - // We were unparked spuriously, reset unparked flag. - unparked = false; + new_state &= !PARKED_BIT; } - } - - // If there are no parked threads, try spinning a few times - if state & PARKED_BIT == 0 && spinwait.spin() { - state = self.state.load(Ordering::Relaxed); - continue; - } - - // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Nothing to do if the parked bit is already set - if state & PARKED_BIT != 0 { - return true; - } - - // If the parked bit is not set then it means we are at - // the front of the queue. If there is space for an - // upgradable lock then we should abort the park and try - // acquiring the lock again. - if state & UPGRADABLE_GUARD != UPGRADABLE_GUARD { - return false; - } - - // Set the parked bit - match self.state.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - TOKEN_UPGRADABLE, - timeout, + match self.state.compare_exchange_weak( + state, + new_state, + Ordering::Relaxed, + Ordering::Relaxed, ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, - - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), - - // The validation function failed, try locking again - ParkResult::Invalid => (), - - // Timeout expired - ParkResult::TimedOut => return false, + Ok(_) => return TOKEN_NORMAL, + Err(x) => state = x, } } - - // Loop back and try locking again - spinwait.reset(); - spinwait_shared.reset(); - state = self.state.load(Ordering::Relaxed); - unparked = true; + }; + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(0, callback); } } #[cold] - #[inline(never)] - fn try_lock_upgradable_slow(&self) -> bool { + fn try_upgrade_slow(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { - if state & PARKED_BIT != 0 { + if state & READERS_MASK != ONE_READER { return false; } - - match state.checked_add(UPGRADABLE_GUARD) { - Some(new_state) => match self.state.compare_exchange_weak( - state, - new_state, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - }, - None => return false, + match self.state.compare_exchange_weak( + state, + state - (ONE_READER | UPGRADABLE_BIT) + WRITER_BIT, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => state = x, } } } #[cold] - #[inline(never)] - fn unlock_upgradable_slow(&self, force_fair: bool) { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Just release the lock if there are no parked threads. - if state & PARKED_BIT == 0 { - match self.state.compare_exchange_weak( - state, - state - UPGRADABLE_GUARD, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => return, - Err(x) => state = x, - } - continue; - } - - break; - } + fn upgrade_slow(&self, timeout: Option) -> bool { + self.wait_for_readers(timeout, ONE_READER | UPGRADABLE_BIT) + } - // There are threads to unpark. We unpark threads up to the guard capacity. - let additional_guards = Cell::new(0usize); + #[cold] + fn downgrade_slow(&self) { + // We only reach this point if PARKED_BIT is set. + let callback = |_, result: UnparkResult| { + // Clear the parked bit if there no more parked threads + if !result.have_more_threads { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + TOKEN_NORMAL + }; + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. unsafe { - let addr = self as *const _ as usize; - let filter = |ParkToken(token)| -> FilterOp { - match additional_guards.get().checked_add(token) { - Some(x) => { - additional_guards.set(x); - FilterOp::Unpark - } - None => FilterOp::Stop, - } - }; - let callback = |result: UnparkResult| { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Release our upgradable lock - let mut new_state = state - UPGRADABLE_GUARD; - - // Clear the parked bit if there are no more threads in - // the queue - if !result.have_more_threads { - new_state &= !PARKED_BIT; - } - - // Consider using fair unlocking. If we are, then we should set - // the state to the new value and tell the threads that we are - // handing the lock directly. - let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) { - match new_state.checked_add(additional_guards.get()) { - Some(x) => { - new_state = x; - TOKEN_HANDOFF - } - None => TOKEN_NORMAL, - } - } else { - TOKEN_NORMAL - }; - - match self.state.compare_exchange_weak( - state, - new_state, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => return token, - Err(x) => state = x, - } - } - }; - parking_lot_core::unpark_filter(addr, filter, callback); + self.wake_parked_threads(ONE_READER, callback); } } #[cold] - #[inline(never)] - fn downgrade_upgradable_slow(&self, state: usize) { + fn downgrade_to_upgradable_slow(&self) { + // We only reach this point if PARKED_BIT is set. + let callback = |_, result: UnparkResult| { + // Clear the parked bit if there no more parked threads + if !result.have_more_threads { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + TOKEN_NORMAL + }; + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. unsafe { - let addr = self as *const _ as usize; - let mut guard_count = (state & GUARD_COUNT_MASK) - UPGRADABLE_GUARD; - let filter = |ParkToken(token)| -> FilterOp { - match guard_count.checked_add(token) { - Some(x) => { - guard_count = x; - FilterOp::Unpark - } - None => FilterOp::Stop, - } - }; - let callback = |result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; - parking_lot_core::unpark_filter(addr, filter, callback); + self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); } } #[cold] - #[inline(never)] - fn try_upgrade_slow(&self) -> bool { + fn bump_shared_slow(&self) { + self.unlock_shared(); + self.lock_shared(); + } + + #[cold] + fn bump_exclusive_slow(&self) { + self.deadlock_release(); + self.unlock_exclusive_slow(true); + self.lock_exclusive(); + } + + #[cold] + fn bump_upgradable_slow(&self) { + self.deadlock_release(); + self.unlock_upgradable_slow(true); + self.lock_upgradable(); + } + + /// Common code for waking up parked threads after releasing WRITER_BIT or + /// UPGRADABLE_BIT. + /// + /// # Safety + /// + /// `callback` must uphold the requirements of the `callback` parameter to + /// `parking_lot_core::unpark_filter`. Meaning no panics or calls into any function in + /// `parking_lot`. + #[inline] + unsafe fn wake_parked_threads( + &self, + new_state: usize, + callback: impl FnOnce(usize, UnparkResult) -> UnparkToken, + ) { + // We must wake up at least one upgrader or writer if there is one, + // otherwise they may end up parked indefinitely since unlock_shared + // does not call wake_parked_threads. + let new_state = Cell::new(new_state); + let addr = self as *const _ as usize; + let filter = |ParkToken(token)| { + let s = new_state.get(); + + // If we are waking up a writer, don't wake anything else. + if s & WRITER_BIT != 0 { + return FilterOp::Stop; + } + + // Otherwise wake *all* readers and one upgrader/writer. + if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { + // Skip writers and upgradable readers if we already have + // a writer/upgradable reader. + FilterOp::Skip + } else { + new_state.set(s + token); + FilterOp::Unpark + } + }; + let callback = |result| callback(new_state.get(), result); + // SAFETY: + // * `addr` is an address we control. + // * `filter` does not panic or call into any function of `parking_lot`. + // * `callback` safety responsibility is on caller + parking_lot_core::unpark_filter(addr, filter, callback); + } + + // Common code for waiting for readers to exit the lock after acquiring + // WRITER_BIT. + #[inline] + fn wait_for_readers(&self, timeout: Option, prev_value: usize) -> bool { + // At this point WRITER_BIT is already set, we just need to wait for the + // remaining readers to exit the lock. + let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); - loop { - match state.checked_add(EXCLUSIVE_GUARD - SHARED_GUARD) { - Some(new_state) => match self.state.compare_exchange_weak( + while state & READERS_MASK != 0 { + // Spin a few times to wait for readers to exit + if spinwait.spin() { + state = self.state.load(Ordering::Relaxed); + continue; + } + + // Set the parked bit + if state & WRITER_PARKED_BIT == 0 { + if let Err(x) = self.state.compare_exchange_weak( state, - new_state, + state | WRITER_PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { - Ok(_) => return true, - Err(x) => state = x, - }, - None => return false, + state = x; + continue; + } + } + + // Park our thread until we are woken up by an unlock + // Using the 2nd key at addr + 1 + let addr = self as *const _ as usize + 1; + let validate = || { + let state = self.state.load(Ordering::Relaxed); + state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 + }; + let before_sleep = || {}; + let timed_out = |_, _| {}; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + let park_result = unsafe { + parking_lot_core::park( + addr, + validate, + before_sleep, + timed_out, + TOKEN_EXCLUSIVE, + timeout, + ) + }; + match park_result { + // We still need to re-check the state if we are unparked + // since a previous writer timing-out could have allowed + // another reader to sneak in before we parked. + ParkResult::Unparked(_) | ParkResult::Invalid => { + state = self.state.load(Ordering::Relaxed); + continue; + } + + // Timeout expired + ParkResult::TimedOut => { + // We need to release WRITER_BIT and revert back to + // our previous value. We also wake up any threads that + // might be waiting on WRITER_BIT. + let state = self.state.fetch_add( + prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), + Ordering::Relaxed, + ); + if state & PARKED_BIT != 0 { + let callback = |_, result: UnparkResult| { + // Clear the parked bit if there no more parked threads + if !result.have_more_threads { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + TOKEN_NORMAL + }; + // SAFETY: `callback` does not panic or call any function of `parking_lot`. + unsafe { + self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); + } + } + return false; + } } } + true } - #[cold] - #[inline(never)] - fn upgrade_slow(&self, timeout: Option) -> bool { + /// Common code for acquiring a lock + #[inline] + fn lock_common( + &self, + timeout: Option, + token: ParkToken, + mut try_lock: impl FnMut(&mut usize) -> bool, + validate_flags: usize, + ) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { - // Grab the lock if it isn't locked, even if there are other - // threads parked. - if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD - UPGRADABLE_GUARD) { - match self.state.compare_exchange_weak( - state, - new_state, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - continue; + // Attempt to grab the lock + if try_lock(&mut state) { + return true; } - // If there are no parked threads and only one other reader, try - // spinning a few times. - if state == UPGRADABLE_GUARD | SHARED_GUARD && spinwait.spin() { + // If there are no parked threads, try spinning a few times. + if state & (PARKED_BIT | WRITER_PARKED_BIT) == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } - // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // If the rwlock is free, abort the park and try to grab - // it immediately. - if state & GUARD_COUNT_MASK == UPGRADABLE_GUARD { - return false; - } + // Set the parked bit + if state & PARKED_BIT == 0 { + if let Err(x) = self.state.compare_exchange_weak( + state, + state | PARKED_BIT, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + state = x; + continue; + } + } - // Set the upgrading and parked bits - match self.state.compare_exchange_weak( - state, - state | (UPGRADING_BIT | PARKED_BIT), - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the upgrading bit - let mut flags = UPGRADING_BIT; - - // Clear the parked bit if we were the last parked thread - if was_last_thread { - flags |= PARKED_BIT; - } + // Park our thread until we are woken up by an unlock + let addr = self as *const _ as usize; + let validate = || { + let state = self.state.load(Ordering::Relaxed); + state & PARKED_BIT != 0 && (state & validate_flags != 0) + }; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; - self.state.fetch_and(!flags, Ordering::Relaxed); - }; - match parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - TOKEN_UPGRADING, - timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + let park_result = unsafe { + parking_lot_core::park(addr, validate, before_sleep, timed_out, token, timeout) + }; + match park_result { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), - // The validation function failed, try locking again - ParkResult::Invalid => (), + // The validation function failed, try locking again + ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } + // Timeout expired + ParkResult::TimedOut => return false, } // Loop back and try locking again @@ -1377,27 +1123,15 @@ } } - #[cold] - #[inline(never)] - fn bump_shared_slow(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - self.unlock_shared_slow(true); - self.lock_shared(); - } - - #[cold] - #[inline(never)] - fn bump_exclusive_slow(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - self.unlock_exclusive_slow(true); - self.lock_exclusive(); + #[inline] + fn deadlock_acquire(&self) { + unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + unsafe { deadlock::acquire_resource(self as *const _ as usize + 1) }; } - #[cold] - #[inline(never)] - fn bump_upgradable_slow(&self) { + #[inline] + fn deadlock_release(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; - self.unlock_upgradable_slow(true); - self.lock_upgradable(); + unsafe { deadlock::release_resource(self as *const _ as usize + 1) }; } } diff -Nru rust-parking-lot-0.7.1/src/remutex.rs rust-parking-lot-0.10.0/src/remutex.rs --- rust-parking-lot-0.7.1/src/remutex.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/remutex.rs 2019-11-25 21:09:35.000000000 +0000 @@ -5,8 +5,9 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. +use crate::raw_mutex::RawMutex; +use core::num::NonZeroUsize; use lock_api::{self, GetThreadId}; -use raw_mutex::RawMutex; /// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`. pub struct RawThreadId; @@ -14,11 +15,15 @@ unsafe impl GetThreadId for RawThreadId { const INIT: RawThreadId = RawThreadId; - fn nonzero_thread_id(&self) -> usize { + fn nonzero_thread_id(&self) -> NonZeroUsize { // The address of a thread-local variable is guaranteed to be unique to the - // current thread, and is also guaranteed to be non-zero. - thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() }); - KEY.with(|x| x as *const _ as usize) + // current thread, and is also guaranteed to be non-zero. The variable has to have a + // non-zero size to guarantee it has a unique address for each thread. + thread_local!(static KEY: u8 = 0); + KEY.with(|x| { + NonZeroUsize::new(x as *const _ as usize) + .expect("thread-local variable address is null") + }) } } @@ -54,25 +59,28 @@ #[cfg(test)] mod tests { + use crate::ReentrantMutex; use std::cell::RefCell; use std::sync::Arc; use std::thread; - use ReentrantMutex; + + #[cfg(feature = "serde")] + use bincode::{deserialize, serialize}; #[test] fn smoke() { - let m = ReentrantMutex::new(()); + let m = ReentrantMutex::new(2); { let a = m.lock(); { let b = m.lock(); { let c = m.lock(); - assert_eq!(*c, ()); + assert_eq!(*c, 2); } - assert_eq!(*b, ()); + assert_eq!(*b, 2); } - assert_eq!(*a, ()); + assert_eq!(*a, 2); } } @@ -113,14 +121,18 @@ let mutex = ReentrantMutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }"); - assert_eq!( - format!("{:#?}", mutex), - "ReentrantMutex { - data: [ - 0, - 10 - ] -}" - ); + } + + #[cfg(feature = "serde")] + #[test] + fn test_serde() { + let contents: Vec = vec![0, 1, 2]; + let mutex = ReentrantMutex::new(contents.clone()); + + let serialized = serialize(&mutex).unwrap(); + let deserialized: ReentrantMutex> = deserialize(&serialized).unwrap(); + + assert_eq!(*(mutex.lock()), *(deserialized.lock())); + assert_eq!(contents, *(deserialized.lock())); } } diff -Nru rust-parking-lot-0.7.1/src/rwlock.rs rust-parking-lot-0.10.0/src/rwlock.rs --- rust-parking-lot-0.7.1/src/rwlock.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/rwlock.rs 2019-11-25 21:09:35.000000000 +0000 @@ -5,8 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. +use crate::raw_rwlock::RawRwLock; use lock_api; -use raw_rwlock::RawRwLock; /// A reader-writer lock /// @@ -120,14 +120,16 @@ #[cfg(test)] mod tests { - extern crate rand; - use self::rand::Rng; + use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; + use rand::Rng; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::Duration; - use {RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; + + #[cfg(feature = "serde")] + use bincode::{deserialize, serialize}; #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); @@ -320,7 +322,7 @@ fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { + let _ = thread::spawn(move || { struct Unwinder { i: Arc>, } @@ -534,7 +536,15 @@ thread::spawn(move || { let _lock = arc2.write(); }); - thread::sleep(Duration::from_millis(100)); + + if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) { + thread::sleep(Duration::from_millis(100)); + } else { + // FIXME: https://github.com/fortanix/rust-sgx/issues/31 + for _ in 0..100 { + thread::yield_now(); + } + } // A normal read would block here since there is a pending writer let _lock2 = arc.read_recursive(); @@ -545,17 +555,8 @@ let x = RwLock::new(vec![0u8, 10]); assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); - assert_eq!( - format!("{:#?}", x), - "RwLock { - data: [ - 0, - 10 - ] -}" - ); let _lock = x.write(); - assert_eq!(format!("{:?}", x), "RwLock { }"); + assert_eq!(format!("{:?}", x), "RwLock { data: }"); } #[test] @@ -565,4 +566,17 @@ let b = a.clone(); assert_eq!(Arc::strong_count(&b), 2); } + + #[cfg(feature = "serde")] + #[test] + fn test_serde() { + let contents: Vec = vec![0, 1, 2]; + let mutex = RwLock::new(contents.clone()); + + let serialized = serialize(&mutex).unwrap(); + let deserialized: RwLock> = deserialize(&serialized).unwrap(); + + assert_eq!(*(mutex.read()), *(deserialized.read())); + assert_eq!(contents, *(deserialized.read())); + } } diff -Nru rust-parking-lot-0.7.1/src/util.rs rust-parking-lot-0.10.0/src/util.rs --- rust-parking-lot-0.7.1/src/util.rs 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/src/util.rs 2019-11-09 11:27:25.000000000 +0000 @@ -22,23 +22,17 @@ } } -// Equivalent to intrinsics::unreachable() in release mode +// hint::unreachable_unchecked() in release mode #[inline] unsafe fn unreachable() -> ! { if cfg!(debug_assertions) { unreachable!(); } else { - enum Void {} - match *(1 as *const Void) {} + core::hint::unreachable_unchecked() } } #[inline] pub fn to_deadline(timeout: Duration) -> Option { - #[cfg(feature = "nightly")] - let deadline = Instant::now().checked_add(timeout); - #[cfg(not(feature = "nightly"))] - let deadline = Some(Instant::now() + timeout); - - deadline + Instant::now().checked_add(timeout) } diff -Nru rust-parking-lot-0.7.1/.travis.yml rust-parking-lot-0.10.0/.travis.yml --- rust-parking-lot-0.7.1/.travis.yml 2019-01-01 14:02:15.000000000 +0000 +++ rust-parking-lot-0.10.0/.travis.yml 2019-11-09 11:27:25.000000000 +0000 @@ -1,39 +1,84 @@ language: rust sudo: false -rust: -- 1.24.0 -- 1.26.2 -- stable -- beta -- nightly - -before_script: -- | - pip install 'travis-cargo<0.2' --user && - export PATH=$HOME/.local/bin:$PATH - -script: -- cd core; -- travis-cargo build; -- cd ../lock_api; -- travis-cargo build; -- cd ..; -- travis-cargo build -- travis-cargo test -- travis-cargo --only stable test -- --features=deadlock_detection -- travis-cargo --only beta test -- --features=deadlock_detection -- travis-cargo --only nightly doc -- --all-features --no-deps -p parking_lot -p parking_lot_core -p lock_api -- cd benchmark -- travis-cargo build -- cargo run --release --bin mutex -- 2 1 0 1 2 -- cargo run --release --bin rwlock -- 1 1 1 0 1 2 -- cd .. - env: global: - - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly - - RUST_TEST_THREADS=1 + - RUST_TEST_THREADS=1 + +matrix: + include: + - rust: 1.36.0 + os: linux + script: + - cargo build --lib + - cargo build --lib --features serde + # Build on other platforms + - rustup target add wasm32-unknown-unknown + - cargo build --lib --target wasm32-unknown-unknown + + - rust: 1.36.0 + os: linux + script: &script + - cargo build + - cargo test --all + - cargo build --features serde + - cargo test --features serde + - cargo test --features=deadlock_detection + # Test other platforms + - rustup target add wasm32-unknown-unknown + - cargo build --all --target wasm32-unknown-unknown + + - rust: stable + os: linux + script: *script + + - rust: beta + os: linux + script: *script + + - rust: nightly + os: linux + script: + - cargo build --features nightly + - cargo test --all --features nightly + - cargo build --features serde,nightly + - cargo test --all --features serde,nightly + # Test other platforms + - rustup target add x86_64-fortanix-unknown-sgx + - rustup target add x86_64-unknown-redox + - rustup target add x86_64-unknown-cloudabi + - rustup target add x86_64-linux-android + - cargo test --all --no-run --target x86_64-fortanix-unknown-sgx --features nightly + - cargo build --all --target x86_64-unknown-redox --features nightly + - cargo build --all --target x86_64-unknown-cloudabi --features nightly + - cargo build --all --target x86_64-linux-android --features nightly + # Test building the docs + - cargo doc --all-features --no-deps -p parking_lot -p parking_lot_core -p lock_api + # Run the benchmarks + - cd benchmark + - cargo run --release --bin mutex -- 2 1 0 1 2 + - cargo run --release --bin rwlock -- 1 1 1 0 1 2 + - cd .. + + - rust: 1.36.0 + os: osx + script: *script + + - rust: stable + os: osx + script: *script + + - rust: beta + os: osx + script: *script + + - rust: nightly + os: osx + script: + - cargo build --features nightly + - cargo test --all --features nightly + - cargo build --features serde,nightly + - cargo test --all --features serde,nightly notifications: email: false